blob: 05d9d5a9dc28c61846f6ec1cd4d0ba04ffe3de5f [file] [log] [blame]
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001/*
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02002 * drivers/base/power/runtime.c - Helper functions for device runtime PM
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02003 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
Alan Stern1bfee5b2010-09-25 23:35:00 +02005 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02006 *
7 * This file is released under the GPLv2.
8 */
9
Ingo Molnar5b3cc152017-02-02 20:43:54 +010010#include <linux/sched/mm.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040011#include <linux/export.h>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020012#include <linux/pm_runtime.h>
Tony Lindgren4990d4f2015-05-18 15:40:29 -070013#include <linux/pm_wakeirq.h>
Ming Leic3dc2f12011-09-27 22:54:41 +020014#include <trace/events/rpm.h>
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +010015
16#include "../base.h"
Alan Stern7490e442010-09-25 23:35:15 +020017#include "power.h"
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020018
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +020019typedef int (*pm_callback_t)(struct device *);
Ulf Hansson5f59df72014-03-01 11:56:04 +010020
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +020021static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
Ulf Hansson5f59df72014-03-01 11:56:04 +010022{
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +020023 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
Ulf Hansson5f59df72014-03-01 11:56:04 +010046}
47
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +020048#define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
Ulf Hansson5f59df72014-03-01 11:56:04 +010050
Alan Stern140a6c92010-09-25 23:35:07 +020051static int rpm_resume(struct device *dev, int rpmflags);
Alan Stern7490e442010-09-25 23:35:15 +020052static int rpm_suspend(struct device *dev, int rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020053
54/**
Alan Stern47693732010-09-25 23:34:46 +020055 * update_pm_runtime_accounting - Update the time accounting of power states
56 * @dev: Device to update the accounting for
57 *
58 * In order to be able to have time accounting of the various power states
59 * (as used by programs such as PowerTOP to show the effectiveness of runtime
60 * PM), we need to track the time spent in each state.
61 * update_pm_runtime_accounting must be called each time before the
62 * runtime_status field is updated, to account the time in the old state
63 * correctly.
64 */
65void update_pm_runtime_accounting(struct device *dev)
66{
67 unsigned long now = jiffies;
venu byravarasudef0c0a32011-11-03 10:12:14 +010068 unsigned long delta;
Alan Stern47693732010-09-25 23:34:46 +020069
70 delta = now - dev->power.accounting_timestamp;
71
Alan Stern47693732010-09-25 23:34:46 +020072 dev->power.accounting_timestamp = now;
73
74 if (dev->power.disable_depth > 0)
75 return;
76
77 if (dev->power.runtime_status == RPM_SUSPENDED)
78 dev->power.suspended_jiffies += delta;
79 else
80 dev->power.active_jiffies += delta;
81}
82
83static void __update_runtime_status(struct device *dev, enum rpm_status status)
84{
85 update_pm_runtime_accounting(dev);
86 dev->power.runtime_status = status;
87}
88
89/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020090 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91 * @dev: Device to handle.
92 */
93static void pm_runtime_deactivate_timer(struct device *dev)
94{
95 if (dev->power.timer_expires > 0) {
96 del_timer(&dev->power.suspend_timer);
97 dev->power.timer_expires = 0;
98 }
99}
100
101/**
102 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
103 * @dev: Device to handle.
104 */
105static void pm_runtime_cancel_pending(struct device *dev)
106{
107 pm_runtime_deactivate_timer(dev);
108 /*
109 * In case there's a request pending, make sure its work function will
110 * return without doing anything.
111 */
112 dev->power.request = RPM_REQ_NONE;
113}
114
Alan Stern15bcb91d2010-09-25 23:35:21 +0200115/*
116 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
117 * @dev: Device to handle.
118 *
119 * Compute the autosuspend-delay expiration time based on the device's
120 * power.last_busy time. If the delay has already expired or is disabled
121 * (negative) or the power.use_autosuspend flag isn't set, return 0.
122 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
123 *
124 * This function may be called either with or without dev->power.lock held.
125 * Either way it can be racy, since power.last_busy may be updated at any time.
126 */
127unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
128{
129 int autosuspend_delay;
130 long elapsed;
131 unsigned long last_busy;
132 unsigned long expires = 0;
133
134 if (!dev->power.use_autosuspend)
135 goto out;
136
137 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
138 if (autosuspend_delay < 0)
139 goto out;
140
141 last_busy = ACCESS_ONCE(dev->power.last_busy);
142 elapsed = jiffies - last_busy;
143 if (elapsed < 0)
144 goto out; /* jiffies has wrapped around. */
145
146 /*
147 * If the autosuspend_delay is >= 1 second, align the timer by rounding
148 * up to the nearest second.
149 */
150 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151 if (autosuspend_delay >= 1000)
152 expires = round_jiffies(expires);
153 expires += !expires;
154 if (elapsed >= expires - last_busy)
155 expires = 0; /* Already expired. */
156
157 out:
158 return expires;
159}
160EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161
Ming Leie8234072013-02-22 16:34:11 -0800162static int dev_memalloc_noio(struct device *dev, void *data)
163{
164 return dev->power.memalloc_noio;
165}
166
167/*
168 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
169 * @dev: Device to handle.
170 * @enable: True for setting the flag and False for clearing the flag.
171 *
172 * Set the flag for all devices in the path from the device to the
173 * root device in the device tree if @enable is true, otherwise clear
174 * the flag for devices in the path whose siblings don't set the flag.
175 *
176 * The function should only be called by block device, or network
177 * device driver for solving the deadlock problem during runtime
178 * resume/suspend:
179 *
180 * If memory allocation with GFP_KERNEL is called inside runtime
181 * resume/suspend callback of any one of its ancestors(or the
182 * block device itself), the deadlock may be triggered inside the
183 * memory allocation since it might not complete until the block
184 * device becomes active and the involed page I/O finishes. The
185 * situation is pointed out first by Alan Stern. Network device
186 * are involved in iSCSI kind of situation.
187 *
188 * The lock of dev_hotplug_mutex is held in the function for handling
189 * hotplug race because pm_runtime_set_memalloc_noio() may be called
190 * in async probe().
191 *
192 * The function should be called between device_add() and device_del()
193 * on the affected device(block/network device).
194 */
195void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
196{
197 static DEFINE_MUTEX(dev_hotplug_mutex);
198
199 mutex_lock(&dev_hotplug_mutex);
200 for (;;) {
201 bool enabled;
202
203 /* hold power lock since bitfield is not SMP-safe. */
204 spin_lock_irq(&dev->power.lock);
205 enabled = dev->power.memalloc_noio;
206 dev->power.memalloc_noio = enable;
207 spin_unlock_irq(&dev->power.lock);
208
209 /*
210 * not need to enable ancestors any more if the device
211 * has been enabled.
212 */
213 if (enabled && enable)
214 break;
215
216 dev = dev->parent;
217
218 /*
219 * clear flag of the parent device only if all the
220 * children don't set the flag because ancestor's
221 * flag was set by any one of the descendants.
222 */
223 if (!dev || (!enable &&
224 device_for_each_child(dev, NULL,
225 dev_memalloc_noio)))
226 break;
227 }
228 mutex_unlock(&dev_hotplug_mutex);
229}
230EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200232/**
Alan Stern1bfee5b2010-09-25 23:35:00 +0200233 * rpm_check_suspend_allowed - Test whether a device may be suspended.
234 * @dev: Device to test.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200235 */
Alan Stern1bfee5b2010-09-25 23:35:00 +0200236static int rpm_check_suspend_allowed(struct device *dev)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200237{
238 int retval = 0;
239
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200240 if (dev->power.runtime_error)
241 retval = -EINVAL;
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200242 else if (dev->power.disable_depth > 0)
243 retval = -EACCES;
244 else if (atomic_read(&dev->power.usage_count) > 0)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200245 retval = -EAGAIN;
Ulf Hansson62006c12016-10-17 20:16:58 +0200246 else if (!dev->power.ignore_children &&
247 atomic_read(&dev->power.child_count))
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200248 retval = -EBUSY;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200249
250 /* Pending resume requests take precedence over suspends. */
251 else if ((dev->power.deferred_resume
Kevin Winchester78ca7c32010-10-29 15:29:55 +0200252 && dev->power.runtime_status == RPM_SUSPENDING)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200253 || (dev->power.request_pending
254 && dev->power.request == RPM_REQ_RESUME))
255 retval = -EAGAIN;
Rafael J. Wysockid5919dc2017-10-31 18:26:15 +0100256 else if (__dev_pm_qos_read_value(dev) < 0)
Rafael J. Wysocki55d7ec42012-08-15 21:32:04 +0200257 retval = -EPERM;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200258 else if (dev->power.runtime_status == RPM_SUSPENDED)
259 retval = 1;
260
261 return retval;
262}
263
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100264static int rpm_get_suppliers(struct device *dev)
265{
266 struct device_link *link;
267
268 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
269 int retval;
270
271 if (!(link->flags & DL_FLAG_PM_RUNTIME))
272 continue;
273
274 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
275 link->rpm_active)
276 continue;
277
278 retval = pm_runtime_get_sync(link->supplier);
Rafael J. Wysocki0b028b02017-12-01 14:58:34 +0100279 /* Ignore suppliers with disabled runtime PM. */
280 if (retval < 0 && retval != -EACCES) {
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100281 pm_runtime_put_noidle(link->supplier);
282 return retval;
283 }
284 link->rpm_active = true;
285 }
286 return 0;
287}
288
289static void rpm_put_suppliers(struct device *dev)
290{
291 struct device_link *link;
292
293 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
294 if (link->rpm_active &&
295 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
296 pm_runtime_put(link->supplier);
297 link->rpm_active = false;
298 }
299}
300
Alan Stern1bfee5b2010-09-25 23:35:00 +0200301/**
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200302 * __rpm_callback - Run a given runtime PM callback for a given device.
303 * @cb: Runtime PM callback to run.
304 * @dev: Device to run the callback for.
305 */
306static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
307 __releases(&dev->power.lock) __acquires(&dev->power.lock)
308{
Rafael J. Wysockic70b3e22021-02-25 19:23:27 +0100309 int retval, idx;
Rafael J. Wysocki866f2902021-03-19 15:47:25 +0100310 bool use_links = dev->power.links_count > 0;
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200311
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100312 if (dev->power.irq_safe) {
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200313 spin_unlock(&dev->power.lock);
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100314 } else {
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200315 spin_unlock_irq(&dev->power.lock);
316
Rafael J. Wysocki866f2902021-03-19 15:47:25 +0100317 /*
318 * Resume suppliers if necessary.
319 *
320 * The device's runtime PM status cannot change until this
321 * routine returns, so it is safe to read the status outside of
322 * the lock.
323 */
324 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100325 idx = device_links_read_lock();
326
327 retval = rpm_get_suppliers(dev);
328 if (retval)
329 goto fail;
330
331 device_links_read_unlock(idx);
332 }
333 }
334
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200335 retval = cb(dev);
336
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100337 if (dev->power.irq_safe) {
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200338 spin_lock(&dev->power.lock);
Rafael J. Wysocki866f2902021-03-19 15:47:25 +0100339 } else {
340 /*
341 * If the device is suspending and the callback has returned
342 * success, drop the usage counters of the suppliers that have
343 * been reference counted on its resume.
344 *
345 * Do that if resume fails too.
346 */
347 if (use_links
348 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
349 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
350 idx = device_links_read_lock();
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100351
Rafael J. Wysocki866f2902021-03-19 15:47:25 +0100352 fail:
353 rpm_put_suppliers(dev);
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100354
Rafael J. Wysocki866f2902021-03-19 15:47:25 +0100355 device_links_read_unlock(idx);
356 }
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100357
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200358 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +0100359 }
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200360
361 return retval;
362}
363
364/**
Alan Stern140a6c92010-09-25 23:35:07 +0200365 * rpm_idle - Notify device bus type if the device can be suspended.
Alan Stern1bfee5b2010-09-25 23:35:00 +0200366 * @dev: Device to notify the bus type about.
367 * @rpmflags: Flag bits.
368 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200369 * Check if the device's runtime PM status allows it to be suspended. If
Alan Stern1bfee5b2010-09-25 23:35:00 +0200370 * another idle notification has been started earlier, return immediately. If
371 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
Ulf Hanssond66e6db2013-10-15 22:25:08 +0200372 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
373 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
Alan Stern1bfee5b2010-09-25 23:35:00 +0200374 *
375 * This function must be called under dev->power.lock with interrupts disabled.
376 */
Alan Stern140a6c92010-09-25 23:35:07 +0200377static int rpm_idle(struct device *dev, int rpmflags)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200378{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200379 int (*callback)(struct device *);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200380 int retval;
381
Paul E. McKenneyd7737ce2016-04-26 13:03:51 -0700382 trace_rpm_idle_rcuidle(dev, rpmflags);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200383 retval = rpm_check_suspend_allowed(dev);
384 if (retval < 0)
385 ; /* Conditions are wrong. */
386
387 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
388 else if (dev->power.runtime_status != RPM_ACTIVE)
389 retval = -EAGAIN;
390
391 /*
392 * Any pending request other than an idle notification takes
393 * precedence over us, except that the timer may be running.
394 */
395 else if (dev->power.request_pending &&
396 dev->power.request > RPM_REQ_IDLE)
397 retval = -EAGAIN;
398
399 /* Act as though RPM_NOWAIT is always set. */
400 else if (dev->power.idle_notification)
401 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200402 if (retval)
403 goto out;
404
Alan Stern1bfee5b2010-09-25 23:35:00 +0200405 /* Pending requests need to be canceled. */
406 dev->power.request = RPM_REQ_NONE;
407
Rafael J. Wysocki45f0a852013-06-03 21:49:52 +0200408 if (dev->power.no_callbacks)
Alan Stern7490e442010-09-25 23:35:15 +0200409 goto out;
Alan Stern7490e442010-09-25 23:35:15 +0200410
Alan Stern1bfee5b2010-09-25 23:35:00 +0200411 /* Carry out an asynchronous or a synchronous idle notification. */
412 if (rpmflags & RPM_ASYNC) {
413 dev->power.request = RPM_REQ_IDLE;
414 if (!dev->power.request_pending) {
415 dev->power.request_pending = true;
416 queue_work(pm_wq, &dev->power.work);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200417 }
Paul E. McKenneyd7737ce2016-04-26 13:03:51 -0700418 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
Rafael J. Wysocki45f0a852013-06-03 21:49:52 +0200419 return 0;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200420 }
421
422 dev->power.idle_notification = true;
423
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +0200424 callback = RPM_GET_CALLBACK(dev, runtime_idle);
Rafael J. Wysocki35cd1332011-12-18 00:34:13 +0100425
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200426 if (callback)
Rafael J. Wysocki45f0a852013-06-03 21:49:52 +0200427 retval = __rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200428
429 dev->power.idle_notification = false;
430 wake_up_all(&dev->power.wait_queue);
431
432 out:
Paul E. McKenneyd7737ce2016-04-26 13:03:51 -0700433 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
Ulf Hanssond66e6db2013-10-15 22:25:08 +0200434 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200435}
436
437/**
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200438 * rpm_callback - Run a given runtime PM callback for a given device.
439 * @cb: Runtime PM callback to run.
440 * @dev: Device to run the callback for.
441 */
442static int rpm_callback(int (*cb)(struct device *), struct device *dev)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200443{
444 int retval;
445
446 if (!cb)
447 return -ENOSYS;
448
Ming Leidb881752013-02-22 16:34:19 -0800449 if (dev->power.memalloc_noio) {
450 unsigned int noio_flag;
451
452 /*
453 * Deadlock might be caused if memory allocation with
454 * GFP_KERNEL happens inside runtime_suspend and
455 * runtime_resume callbacks of one block device's
456 * ancestor or the block device itself. Network
457 * device might be thought as part of iSCSI block
458 * device, so network device and its ancestor should
459 * be marked as memalloc_noio too.
460 */
461 noio_flag = memalloc_noio_save();
462 retval = __rpm_callback(cb, dev);
463 memalloc_noio_restore(noio_flag);
464 } else {
465 retval = __rpm_callback(cb, dev);
466 }
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200467
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200468 dev->power.runtime_error = retval;
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200469 return retval != -EACCES ? retval : -EIO;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200470}
471
472/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200473 * rpm_suspend - Carry out runtime suspend of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200474 * @dev: Device to suspend.
Alan Stern3f9af052010-09-25 23:34:54 +0200475 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200476 *
Ming Lei47d8f0b2011-10-12 11:53:32 +0800477 * Check if the device's runtime PM status allows it to be suspended.
478 * Cancel a pending idle notification, autosuspend or suspend. If
479 * another suspend has been started earlier, either return immediately
480 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
481 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
Ming Lei857b36c2011-10-12 22:59:33 +0200482 * otherwise run the ->runtime_suspend() callback directly. When
483 * ->runtime_suspend succeeded, if a deferred resume was requested while
484 * the callback was running then carry it out, otherwise send an idle
485 * notification for its parent (if the suspend succeeded and both
486 * ignore_children of parent->power and irq_safe of dev->power are not set).
Alan Stern886486b2011-11-03 23:39:18 +0100487 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
488 * flag is set and the next autosuspend-delay expiration time is in the
489 * future, schedule another autosuspend attempt.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200490 *
491 * This function must be called under dev->power.lock with interrupts disabled.
492 */
Alan Stern140a6c92010-09-25 23:35:07 +0200493static int rpm_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200494 __releases(&dev->power.lock) __acquires(&dev->power.lock)
495{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200496 int (*callback)(struct device *);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200497 struct device *parent = NULL;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200498 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200499
Paul E. McKenney77893572016-04-26 10:42:25 -0700500 trace_rpm_suspend_rcuidle(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200501
502 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200503 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200504
Alan Stern1bfee5b2010-09-25 23:35:00 +0200505 if (retval < 0)
506 ; /* Conditions are wrong. */
507
508 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
509 else if (dev->power.runtime_status == RPM_RESUMING &&
510 !(rpmflags & RPM_ASYNC))
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200511 retval = -EAGAIN;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200512 if (retval)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200513 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200514
Alan Stern15bcb91d2010-09-25 23:35:21 +0200515 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
516 if ((rpmflags & RPM_AUTO)
517 && dev->power.runtime_status != RPM_SUSPENDING) {
518 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
519
520 if (expires != 0) {
521 /* Pending requests need to be canceled. */
522 dev->power.request = RPM_REQ_NONE;
523
524 /*
525 * Optimization: If the timer is already running and is
526 * set to expire at or before the autosuspend delay,
527 * avoid the overhead of resetting it. Just let it
528 * expire; pm_suspend_timer_fn() will take care of the
529 * rest.
530 */
531 if (!(dev->power.timer_expires && time_before_eq(
532 dev->power.timer_expires, expires))) {
533 dev->power.timer_expires = expires;
534 mod_timer(&dev->power.suspend_timer, expires);
535 }
536 dev->power.timer_autosuspends = 1;
537 goto out;
538 }
539 }
540
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200541 /* Other scheduled or pending requests need to be canceled. */
542 pm_runtime_cancel_pending(dev);
543
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200544 if (dev->power.runtime_status == RPM_SUSPENDING) {
545 DEFINE_WAIT(wait);
546
Alan Stern1bfee5b2010-09-25 23:35:00 +0200547 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200548 retval = -EINPROGRESS;
549 goto out;
550 }
551
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200552 if (dev->power.irq_safe) {
553 spin_unlock(&dev->power.lock);
554
555 cpu_relax();
556
557 spin_lock(&dev->power.lock);
558 goto repeat;
559 }
560
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200561 /* Wait for the other suspend running in parallel with us. */
562 for (;;) {
563 prepare_to_wait(&dev->power.wait_queue, &wait,
564 TASK_UNINTERRUPTIBLE);
565 if (dev->power.runtime_status != RPM_SUSPENDING)
566 break;
567
568 spin_unlock_irq(&dev->power.lock);
569
570 schedule();
571
572 spin_lock_irq(&dev->power.lock);
573 }
574 finish_wait(&dev->power.wait_queue, &wait);
575 goto repeat;
576 }
577
Alan Stern7490e442010-09-25 23:35:15 +0200578 if (dev->power.no_callbacks)
579 goto no_callback; /* Assume success. */
580
Alan Stern1bfee5b2010-09-25 23:35:00 +0200581 /* Carry out an asynchronous or a synchronous suspend. */
582 if (rpmflags & RPM_ASYNC) {
Alan Stern15bcb91d2010-09-25 23:35:21 +0200583 dev->power.request = (rpmflags & RPM_AUTO) ?
584 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200585 if (!dev->power.request_pending) {
586 dev->power.request_pending = true;
587 queue_work(pm_wq, &dev->power.work);
588 }
589 goto out;
590 }
591
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200592 __update_runtime_status(dev, RPM_SUSPENDING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200593
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +0200594 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
Rafael J. Wysocki35cd1332011-12-18 00:34:13 +0100595
Tony Lindgrenbed57032016-12-05 16:38:16 -0800596 dev_pm_enable_wake_irq_check(dev, true);
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200597 retval = rpm_callback(callback, dev);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100598 if (retval)
599 goto fail;
Alan Stern886486b2011-11-03 23:39:18 +0100600
Chunfeng Yunb54ef492024-04-17 12:24:53 -0400601 dev_pm_enable_wake_irq_complete(dev);
602
Alan Stern7490e442010-09-25 23:35:15 +0200603 no_callback:
Ming Lei857b36c2011-10-12 22:59:33 +0200604 __update_runtime_status(dev, RPM_SUSPENDED);
605 pm_runtime_deactivate_timer(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200606
Ming Lei857b36c2011-10-12 22:59:33 +0200607 if (dev->parent) {
608 parent = dev->parent;
609 atomic_add_unless(&parent->power.child_count, -1, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200610 }
611 wake_up_all(&dev->power.wait_queue);
612
613 if (dev->power.deferred_resume) {
Rafael J. Wysocki58a34de2012-08-15 21:31:55 +0200614 dev->power.deferred_resume = false;
Alan Stern140a6c92010-09-25 23:35:07 +0200615 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200616 retval = -EAGAIN;
617 goto out;
618 }
619
Alan Sternc3810c82011-01-25 20:50:07 +0100620 /* Maybe the parent is now able to suspend. */
Alan Sternc7b61de2010-12-01 00:14:42 +0100621 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
Alan Sternc3810c82011-01-25 20:50:07 +0100622 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200623
Alan Sternc3810c82011-01-25 20:50:07 +0100624 spin_lock(&parent->power.lock);
625 rpm_idle(parent, RPM_ASYNC);
626 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200627
Alan Sternc3810c82011-01-25 20:50:07 +0100628 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200629 }
630
631 out:
Paul E. McKenney77893572016-04-26 10:42:25 -0700632 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200633
634 return retval;
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100635
636 fail:
Chunfeng Yunb54ef492024-04-17 12:24:53 -0400637 dev_pm_disable_wake_irq_check(dev, true);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100638 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100639 dev->power.deferred_resume = false;
Alan Sternf2791d72012-03-26 22:46:52 +0200640 wake_up_all(&dev->power.wait_queue);
641
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100642 if (retval == -EAGAIN || retval == -EBUSY) {
643 dev->power.runtime_error = 0;
644
645 /*
646 * If the callback routine failed an autosuspend, and
647 * if the last_busy time has been updated so that there
648 * is a new autosuspend expiration time, automatically
649 * reschedule another autosuspend.
650 */
651 if ((rpmflags & RPM_AUTO) &&
652 pm_runtime_autosuspend_expiration(dev) != 0)
653 goto repeat;
654 } else {
655 pm_runtime_cancel_pending(dev);
656 }
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100657 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200658}
659
660/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200661 * rpm_resume - Carry out runtime resume of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200662 * @dev: Device to resume.
Alan Stern3f9af052010-09-25 23:34:54 +0200663 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200664 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200665 * Check if the device's runtime PM status allows it to be resumed. Cancel
Alan Stern1bfee5b2010-09-25 23:35:00 +0200666 * any scheduled or pending requests. If another resume has been started
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300667 * earlier, either return immediately or wait for it to finish, depending on the
Alan Stern1bfee5b2010-09-25 23:35:00 +0200668 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
669 * parallel with this function, either tell the other process to resume after
670 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
671 * flag is set then queue a resume request; otherwise run the
672 * ->runtime_resume() callback directly. Queue an idle notification for the
673 * device if the resume succeeded.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200674 *
675 * This function must be called under dev->power.lock with interrupts disabled.
676 */
Alan Stern140a6c92010-09-25 23:35:07 +0200677static int rpm_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200678 __releases(&dev->power.lock) __acquires(&dev->power.lock)
679{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200680 int (*callback)(struct device *);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200681 struct device *parent = NULL;
682 int retval = 0;
683
Paul E. McKenneyd44c9502016-04-26 13:38:55 -0700684 trace_rpm_resume_rcuidle(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200685
686 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200687 if (dev->power.runtime_error)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200688 retval = -EINVAL;
Kevin Hilman6f3c77b2012-09-21 22:47:34 +0000689 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
690 && dev->power.runtime_status == RPM_ACTIVE)
691 retval = 1;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200692 else if (dev->power.disable_depth > 0)
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200693 retval = -EACCES;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200694 if (retval)
695 goto out;
696
Alan Stern15bcb91d2010-09-25 23:35:21 +0200697 /*
698 * Other scheduled or pending requests need to be canceled. Small
699 * optimization: If an autosuspend timer is running, leave it running
700 * rather than cancelling it now only to restart it again in the near
701 * future.
702 */
703 dev->power.request = RPM_REQ_NONE;
704 if (!dev->power.timer_autosuspends)
705 pm_runtime_deactivate_timer(dev);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200706
707 if (dev->power.runtime_status == RPM_ACTIVE) {
708 retval = 1;
709 goto out;
710 }
711
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200712 if (dev->power.runtime_status == RPM_RESUMING
713 || dev->power.runtime_status == RPM_SUSPENDING) {
714 DEFINE_WAIT(wait);
715
Alan Stern1bfee5b2010-09-25 23:35:00 +0200716 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200717 if (dev->power.runtime_status == RPM_SUSPENDING)
718 dev->power.deferred_resume = true;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200719 else
720 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200721 goto out;
722 }
723
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200724 if (dev->power.irq_safe) {
725 spin_unlock(&dev->power.lock);
726
727 cpu_relax();
728
729 spin_lock(&dev->power.lock);
730 goto repeat;
731 }
732
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200733 /* Wait for the operation carried out in parallel with us. */
734 for (;;) {
735 prepare_to_wait(&dev->power.wait_queue, &wait,
736 TASK_UNINTERRUPTIBLE);
737 if (dev->power.runtime_status != RPM_RESUMING
738 && dev->power.runtime_status != RPM_SUSPENDING)
739 break;
740
741 spin_unlock_irq(&dev->power.lock);
742
743 schedule();
744
745 spin_lock_irq(&dev->power.lock);
746 }
747 finish_wait(&dev->power.wait_queue, &wait);
748 goto repeat;
749 }
750
Alan Stern7490e442010-09-25 23:35:15 +0200751 /*
752 * See if we can skip waking up the parent. This is safe only if
753 * power.no_callbacks is set, because otherwise we don't know whether
754 * the resume will actually succeed.
755 */
756 if (dev->power.no_callbacks && !parent && dev->parent) {
Ming Leid63be5f2010-10-22 23:48:14 +0200757 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
Alan Stern7490e442010-09-25 23:35:15 +0200758 if (dev->parent->power.disable_depth > 0
759 || dev->parent->power.ignore_children
760 || dev->parent->power.runtime_status == RPM_ACTIVE) {
761 atomic_inc(&dev->parent->power.child_count);
762 spin_unlock(&dev->parent->power.lock);
Rafael J. Wysocki7f321c22012-08-15 21:31:45 +0200763 retval = 1;
Alan Stern7490e442010-09-25 23:35:15 +0200764 goto no_callback; /* Assume success. */
765 }
766 spin_unlock(&dev->parent->power.lock);
767 }
768
Alan Stern1bfee5b2010-09-25 23:35:00 +0200769 /* Carry out an asynchronous or a synchronous resume. */
770 if (rpmflags & RPM_ASYNC) {
771 dev->power.request = RPM_REQ_RESUME;
772 if (!dev->power.request_pending) {
773 dev->power.request_pending = true;
774 queue_work(pm_wq, &dev->power.work);
775 }
776 retval = 0;
777 goto out;
778 }
779
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200780 if (!parent && dev->parent) {
781 /*
Alan Sternc7b61de2010-12-01 00:14:42 +0100782 * Increment the parent's usage counter and resume it if
783 * necessary. Not needed if dev is irq-safe; then the
784 * parent is permanently resumed.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200785 */
786 parent = dev->parent;
Alan Sternc7b61de2010-12-01 00:14:42 +0100787 if (dev->power.irq_safe)
788 goto skip_parent;
Alan Stern862f89b2009-11-25 01:06:37 +0100789 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200790
791 pm_runtime_get_noresume(parent);
792
Alan Stern862f89b2009-11-25 01:06:37 +0100793 spin_lock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200794 /*
Ulf Hansson216ef0b2016-10-17 20:16:59 +0200795 * Resume the parent if it has runtime PM enabled and not been
796 * set to ignore its children.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200797 */
798 if (!parent->power.disable_depth
799 && !parent->power.ignore_children) {
Alan Stern140a6c92010-09-25 23:35:07 +0200800 rpm_resume(parent, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200801 if (parent->power.runtime_status != RPM_ACTIVE)
802 retval = -EBUSY;
803 }
Alan Stern862f89b2009-11-25 01:06:37 +0100804 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200805
Alan Stern862f89b2009-11-25 01:06:37 +0100806 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200807 if (retval)
808 goto out;
809 goto repeat;
810 }
Alan Sternc7b61de2010-12-01 00:14:42 +0100811 skip_parent:
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200812
Alan Stern7490e442010-09-25 23:35:15 +0200813 if (dev->power.no_callbacks)
814 goto no_callback; /* Assume success. */
815
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200816 __update_runtime_status(dev, RPM_RESUMING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200817
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +0200818 callback = RPM_GET_CALLBACK(dev, runtime_resume);
Rafael J. Wysocki35cd1332011-12-18 00:34:13 +0100819
Chunfeng Yunb54ef492024-04-17 12:24:53 -0400820 dev_pm_disable_wake_irq_check(dev, false);
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200821 retval = rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200822 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200823 __update_runtime_status(dev, RPM_SUSPENDED);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200824 pm_runtime_cancel_pending(dev);
Tony Lindgrenbed57032016-12-05 16:38:16 -0800825 dev_pm_enable_wake_irq_check(dev, false);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200826 } else {
Alan Stern7490e442010-09-25 23:35:15 +0200827 no_callback:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200828 __update_runtime_status(dev, RPM_ACTIVE);
Tony Lindgren56f487c2015-05-13 16:36:32 -0700829 pm_runtime_mark_last_busy(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200830 if (parent)
831 atomic_inc(&parent->power.child_count);
832 }
833 wake_up_all(&dev->power.wait_queue);
834
Rafael J. Wysocki7f321c22012-08-15 21:31:45 +0200835 if (retval >= 0)
Alan Stern140a6c92010-09-25 23:35:07 +0200836 rpm_idle(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200837
838 out:
Alan Sternc7b61de2010-12-01 00:14:42 +0100839 if (parent && !dev->power.irq_safe) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200840 spin_unlock_irq(&dev->power.lock);
841
842 pm_runtime_put(parent);
843
844 spin_lock_irq(&dev->power.lock);
845 }
846
Paul E. McKenneyd44c9502016-04-26 13:38:55 -0700847 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200848
849 return retval;
850}
851
852/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200853 * pm_runtime_work - Universal runtime PM work function.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200854 * @work: Work structure used for scheduling the execution of this function.
855 *
856 * Use @work to get the device object the work is to be done for, determine what
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200857 * is to be done and execute the appropriate runtime PM function.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200858 */
859static void pm_runtime_work(struct work_struct *work)
860{
861 struct device *dev = container_of(work, struct device, power.work);
862 enum rpm_request req;
863
864 spin_lock_irq(&dev->power.lock);
865
866 if (!dev->power.request_pending)
867 goto out;
868
869 req = dev->power.request;
870 dev->power.request = RPM_REQ_NONE;
871 dev->power.request_pending = false;
872
873 switch (req) {
874 case RPM_REQ_NONE:
875 break;
876 case RPM_REQ_IDLE:
Alan Stern140a6c92010-09-25 23:35:07 +0200877 rpm_idle(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200878 break;
879 case RPM_REQ_SUSPEND:
Alan Stern140a6c92010-09-25 23:35:07 +0200880 rpm_suspend(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200881 break;
Alan Stern15bcb91d2010-09-25 23:35:21 +0200882 case RPM_REQ_AUTOSUSPEND:
883 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
884 break;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200885 case RPM_REQ_RESUME:
Alan Stern140a6c92010-09-25 23:35:07 +0200886 rpm_resume(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200887 break;
888 }
889
890 out:
891 spin_unlock_irq(&dev->power.lock);
892}
893
894/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200895 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
896 * @data: Device pointer passed by pm_schedule_suspend().
897 *
Alan Stern1bfee5b2010-09-25 23:35:00 +0200898 * Check if the time is right and queue a suspend request.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200899 */
900static void pm_suspend_timer_fn(unsigned long data)
901{
902 struct device *dev = (struct device *)data;
903 unsigned long flags;
904 unsigned long expires;
905
906 spin_lock_irqsave(&dev->power.lock, flags);
907
908 expires = dev->power.timer_expires;
909 /* If 'expire' is after 'jiffies' we've been called too early. */
910 if (expires > 0 && !time_after(expires, jiffies)) {
911 dev->power.timer_expires = 0;
Alan Stern15bcb91d2010-09-25 23:35:21 +0200912 rpm_suspend(dev, dev->power.timer_autosuspends ?
913 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200914 }
915
916 spin_unlock_irqrestore(&dev->power.lock, flags);
917}
918
919/**
920 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
921 * @dev: Device to suspend.
922 * @delay: Time to wait before submitting a suspend request, in milliseconds.
923 */
924int pm_schedule_suspend(struct device *dev, unsigned int delay)
925{
926 unsigned long flags;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200927 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200928
929 spin_lock_irqsave(&dev->power.lock, flags);
930
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200931 if (!delay) {
Alan Stern140a6c92010-09-25 23:35:07 +0200932 retval = rpm_suspend(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200933 goto out;
934 }
935
Alan Stern1bfee5b2010-09-25 23:35:00 +0200936 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200937 if (retval)
938 goto out;
939
Alan Stern1bfee5b2010-09-25 23:35:00 +0200940 /* Other scheduled or pending requests need to be canceled. */
941 pm_runtime_cancel_pending(dev);
942
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200943 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200944 dev->power.timer_expires += !dev->power.timer_expires;
Alan Stern15bcb91d2010-09-25 23:35:21 +0200945 dev->power.timer_autosuspends = 0;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200946 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
947
948 out:
949 spin_unlock_irqrestore(&dev->power.lock, flags);
950
951 return retval;
952}
953EXPORT_SYMBOL_GPL(pm_schedule_suspend);
954
955/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200956 * __pm_runtime_idle - Entry point for runtime idle operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200957 * @dev: Device to send idle notification for.
958 * @rpmflags: Flag bits.
959 *
960 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
961 * return immediately if it is larger than zero. Then carry out an idle
962 * notification, either synchronous or asynchronous.
963 *
Colin Cross311aab72011-08-08 23:39:36 +0200964 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
965 * or if pm_runtime_irq_safe() has been called.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200966 */
Alan Stern140a6c92010-09-25 23:35:07 +0200967int __pm_runtime_idle(struct device *dev, int rpmflags)
968{
969 unsigned long flags;
970 int retval;
971
972 if (rpmflags & RPM_GET_PUT) {
973 if (!atomic_dec_and_test(&dev->power.usage_count))
974 return 0;
975 }
976
Rafael J. Wysockia9306a62017-02-04 00:44:36 +0100977 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
978
Alan Stern140a6c92010-09-25 23:35:07 +0200979 spin_lock_irqsave(&dev->power.lock, flags);
980 retval = rpm_idle(dev, rpmflags);
981 spin_unlock_irqrestore(&dev->power.lock, flags);
982
983 return retval;
984}
985EXPORT_SYMBOL_GPL(__pm_runtime_idle);
986
987/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200988 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200989 * @dev: Device to suspend.
990 * @rpmflags: Flag bits.
991 *
Alan Stern15bcb91d2010-09-25 23:35:21 +0200992 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
993 * return immediately if it is larger than zero. Then carry out a suspend,
994 * either synchronous or asynchronous.
Alan Stern140a6c92010-09-25 23:35:07 +0200995 *
Colin Cross311aab72011-08-08 23:39:36 +0200996 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
997 * or if pm_runtime_irq_safe() has been called.
Alan Stern140a6c92010-09-25 23:35:07 +0200998 */
999int __pm_runtime_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001000{
1001 unsigned long flags;
1002 int retval;
1003
Alan Stern15bcb91d2010-09-25 23:35:21 +02001004 if (rpmflags & RPM_GET_PUT) {
1005 if (!atomic_dec_and_test(&dev->power.usage_count))
1006 return 0;
1007 }
1008
Rafael J. Wysockia9306a62017-02-04 00:44:36 +01001009 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1010
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001011 spin_lock_irqsave(&dev->power.lock, flags);
Alan Stern140a6c92010-09-25 23:35:07 +02001012 retval = rpm_suspend(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001013 spin_unlock_irqrestore(&dev->power.lock, flags);
1014
1015 return retval;
1016}
Alan Stern140a6c92010-09-25 23:35:07 +02001017EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001018
1019/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001020 * __pm_runtime_resume - Entry point for runtime resume operations.
Alan Stern140a6c92010-09-25 23:35:07 +02001021 * @dev: Device to resume.
Alan Stern3f9af052010-09-25 23:34:54 +02001022 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001023 *
Alan Stern140a6c92010-09-25 23:35:07 +02001024 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1025 * carry out a resume, either synchronous or asynchronous.
1026 *
Colin Cross311aab72011-08-08 23:39:36 +02001027 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1028 * or if pm_runtime_irq_safe() has been called.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001029 */
Alan Stern140a6c92010-09-25 23:35:07 +02001030int __pm_runtime_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001031{
Alan Stern140a6c92010-09-25 23:35:07 +02001032 unsigned long flags;
Alan Stern1d531c12009-12-13 20:28:30 +01001033 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001034
Rafael J. Wysockia9306a62017-02-04 00:44:36 +01001035 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1036 dev->power.runtime_status != RPM_ACTIVE);
Colin Cross311aab72011-08-08 23:39:36 +02001037
Alan Stern140a6c92010-09-25 23:35:07 +02001038 if (rpmflags & RPM_GET_PUT)
1039 atomic_inc(&dev->power.usage_count);
1040
1041 spin_lock_irqsave(&dev->power.lock, flags);
1042 retval = rpm_resume(dev, rpmflags);
1043 spin_unlock_irqrestore(&dev->power.lock, flags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001044
1045 return retval;
1046}
Alan Stern140a6c92010-09-25 23:35:07 +02001047EXPORT_SYMBOL_GPL(__pm_runtime_resume);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001048
1049/**
Rafael J. Wysockia436b6a2015-12-17 02:54:26 +01001050 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1051 * @dev: Device to handle.
1052 *
1053 * Return -EINVAL if runtime PM is disabled for the device.
1054 *
1055 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1056 * and the runtime PM usage counter is nonzero, increment the counter and
1057 * return 1. Otherwise return 0 without changing the counter.
1058 */
1059int pm_runtime_get_if_in_use(struct device *dev)
1060{
1061 unsigned long flags;
1062 int retval;
1063
1064 spin_lock_irqsave(&dev->power.lock, flags);
1065 retval = dev->power.disable_depth > 0 ? -EINVAL :
1066 dev->power.runtime_status == RPM_ACTIVE
1067 && atomic_inc_not_zero(&dev->power.usage_count);
1068 spin_unlock_irqrestore(&dev->power.lock, flags);
1069 return retval;
1070}
1071EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1072
1073/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001074 * __pm_runtime_set_status - Set runtime PM status of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001075 * @dev: Device to handle.
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001076 * @status: New runtime PM status of the device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001077 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001078 * If runtime PM of the device is disabled or its power.runtime_error field is
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001079 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1080 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1081 * However, if the device has a parent and the parent is not active, and the
1082 * parent's power.ignore_children flag is unset, the device's status cannot be
1083 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1084 *
1085 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1086 * and the device parent's counter of unsuspended children is modified to
1087 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1088 * notification request for the parent is submitted.
1089 */
1090int __pm_runtime_set_status(struct device *dev, unsigned int status)
1091{
1092 struct device *parent = dev->parent;
1093 unsigned long flags;
1094 bool notify_parent = false;
1095 int error = 0;
1096
1097 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1098 return -EINVAL;
1099
1100 spin_lock_irqsave(&dev->power.lock, flags);
1101
1102 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1103 error = -EAGAIN;
1104 goto out;
1105 }
1106
1107 if (dev->power.runtime_status == status)
1108 goto out_set;
1109
1110 if (status == RPM_SUSPENDED) {
Ulf Hanssona8636c82016-10-17 20:17:01 +02001111 /*
1112 * It is invalid to suspend a device with an active child,
1113 * unless it has been set to ignore its children.
1114 */
1115 if (!dev->power.ignore_children &&
1116 atomic_read(&dev->power.child_count)) {
1117 dev_err(dev, "runtime PM trying to suspend device but active child\n");
1118 error = -EBUSY;
1119 goto out;
1120 }
1121
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001122 if (parent) {
1123 atomic_add_unless(&parent->power.child_count, -1, 0);
1124 notify_parent = !parent->power.ignore_children;
1125 }
1126 goto out_set;
1127 }
1128
1129 if (parent) {
Rafael J. Wysockibab636b2009-12-03 20:21:21 +01001130 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001131
1132 /*
1133 * It is invalid to put an active child under a parent that is
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001134 * not active, has runtime PM enabled and the
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001135 * 'power.ignore_children' flag unset.
1136 */
1137 if (!parent->power.disable_depth
1138 && !parent->power.ignore_children
Linus Walleij71723f92016-06-20 11:14:26 +02001139 && parent->power.runtime_status != RPM_ACTIVE) {
1140 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1141 dev_name(dev),
1142 dev_name(parent));
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001143 error = -EBUSY;
Linus Walleij71723f92016-06-20 11:14:26 +02001144 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +01001145 atomic_inc(&parent->power.child_count);
Linus Walleij71723f92016-06-20 11:14:26 +02001146 }
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001147
Alan Stern862f89b2009-11-25 01:06:37 +01001148 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001149
1150 if (error)
1151 goto out;
1152 }
1153
1154 out_set:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001155 __update_runtime_status(dev, status);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001156 dev->power.runtime_error = 0;
1157 out:
1158 spin_unlock_irqrestore(&dev->power.lock, flags);
1159
1160 if (notify_parent)
1161 pm_request_idle(parent);
1162
1163 return error;
1164}
1165EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1166
1167/**
1168 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1169 * @dev: Device to handle.
1170 *
1171 * Flush all pending requests for the device from pm_wq and wait for all
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001172 * runtime PM operations involving the device in progress to complete.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001173 *
1174 * Should be called under dev->power.lock with interrupts disabled.
1175 */
1176static void __pm_runtime_barrier(struct device *dev)
1177{
1178 pm_runtime_deactivate_timer(dev);
1179
1180 if (dev->power.request_pending) {
1181 dev->power.request = RPM_REQ_NONE;
1182 spin_unlock_irq(&dev->power.lock);
1183
1184 cancel_work_sync(&dev->power.work);
1185
1186 spin_lock_irq(&dev->power.lock);
1187 dev->power.request_pending = false;
1188 }
1189
1190 if (dev->power.runtime_status == RPM_SUSPENDING
1191 || dev->power.runtime_status == RPM_RESUMING
1192 || dev->power.idle_notification) {
1193 DEFINE_WAIT(wait);
1194
1195 /* Suspend, wake-up or idle notification in progress. */
1196 for (;;) {
1197 prepare_to_wait(&dev->power.wait_queue, &wait,
1198 TASK_UNINTERRUPTIBLE);
1199 if (dev->power.runtime_status != RPM_SUSPENDING
1200 && dev->power.runtime_status != RPM_RESUMING
1201 && !dev->power.idle_notification)
1202 break;
1203 spin_unlock_irq(&dev->power.lock);
1204
1205 schedule();
1206
1207 spin_lock_irq(&dev->power.lock);
1208 }
1209 finish_wait(&dev->power.wait_queue, &wait);
1210 }
1211}
1212
1213/**
1214 * pm_runtime_barrier - Flush pending requests and wait for completions.
1215 * @dev: Device to handle.
1216 *
1217 * Prevent the device from being suspended by incrementing its usage counter and
1218 * if there's a pending resume request for the device, wake the device up.
1219 * Next, make sure that all pending requests for the device have been flushed
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001220 * from pm_wq and wait for all runtime PM operations involving the device in
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001221 * progress to complete.
1222 *
1223 * Return value:
1224 * 1, if there was a resume request pending and the device had to be woken up,
1225 * 0, otherwise
1226 */
1227int pm_runtime_barrier(struct device *dev)
1228{
1229 int retval = 0;
1230
1231 pm_runtime_get_noresume(dev);
1232 spin_lock_irq(&dev->power.lock);
1233
1234 if (dev->power.request_pending
1235 && dev->power.request == RPM_REQ_RESUME) {
Alan Stern140a6c92010-09-25 23:35:07 +02001236 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001237 retval = 1;
1238 }
1239
1240 __pm_runtime_barrier(dev);
1241
1242 spin_unlock_irq(&dev->power.lock);
1243 pm_runtime_put_noidle(dev);
1244
1245 return retval;
1246}
1247EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1248
1249/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001250 * __pm_runtime_disable - Disable runtime PM of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001251 * @dev: Device to handle.
1252 * @check_resume: If set, check if there's a resume request for the device.
1253 *
Geert Uytterhoeven7b608942014-03-11 11:23:40 +01001254 * Increment power.disable_depth for the device and if it was zero previously,
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001255 * cancel all pending runtime PM requests for the device and wait for all
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001256 * operations in progress to complete. The device can be either active or
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001257 * suspended after its runtime PM has been disabled.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001258 *
1259 * If @check_resume is set and there's a resume request pending when
1260 * __pm_runtime_disable() is called and power.disable_depth is zero, the
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001261 * function will wake up the device before disabling its runtime PM.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001262 */
1263void __pm_runtime_disable(struct device *dev, bool check_resume)
1264{
1265 spin_lock_irq(&dev->power.lock);
1266
1267 if (dev->power.disable_depth > 0) {
1268 dev->power.disable_depth++;
1269 goto out;
1270 }
1271
1272 /*
1273 * Wake up the device if there's a resume request pending, because that
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001274 * means there probably is some I/O to process and disabling runtime PM
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001275 * shouldn't prevent the device from processing the I/O.
1276 */
1277 if (check_resume && dev->power.request_pending
1278 && dev->power.request == RPM_REQ_RESUME) {
1279 /*
1280 * Prevent suspends and idle notifications from being carried
1281 * out after we have woken up the device.
1282 */
1283 pm_runtime_get_noresume(dev);
1284
Alan Stern140a6c92010-09-25 23:35:07 +02001285 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001286
1287 pm_runtime_put_noidle(dev);
1288 }
1289
1290 if (!dev->power.disable_depth++)
1291 __pm_runtime_barrier(dev);
1292
1293 out:
1294 spin_unlock_irq(&dev->power.lock);
1295}
1296EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1297
1298/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001299 * pm_runtime_enable - Enable runtime PM of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001300 * @dev: Device to handle.
1301 */
1302void pm_runtime_enable(struct device *dev)
1303{
1304 unsigned long flags;
1305
1306 spin_lock_irqsave(&dev->power.lock, flags);
1307
1308 if (dev->power.disable_depth > 0)
1309 dev->power.disable_depth--;
1310 else
1311 dev_warn(dev, "Unbalanced %s!\n", __func__);
1312
1313 spin_unlock_irqrestore(&dev->power.lock, flags);
1314}
1315EXPORT_SYMBOL_GPL(pm_runtime_enable);
1316
1317/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001318 * pm_runtime_forbid - Block runtime PM of a device.
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001319 * @dev: Device to handle.
1320 *
1321 * Increase the device's usage count and clear its power.runtime_auto flag,
1322 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1323 * for it.
1324 */
1325void pm_runtime_forbid(struct device *dev)
1326{
1327 spin_lock_irq(&dev->power.lock);
1328 if (!dev->power.runtime_auto)
1329 goto out;
1330
1331 dev->power.runtime_auto = false;
1332 atomic_inc(&dev->power.usage_count);
Alan Stern140a6c92010-09-25 23:35:07 +02001333 rpm_resume(dev, 0);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001334
1335 out:
1336 spin_unlock_irq(&dev->power.lock);
1337}
1338EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1339
1340/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001341 * pm_runtime_allow - Unblock runtime PM of a device.
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001342 * @dev: Device to handle.
1343 *
1344 * Decrease the device's usage count and set its power.runtime_auto flag.
1345 */
1346void pm_runtime_allow(struct device *dev)
1347{
1348 spin_lock_irq(&dev->power.lock);
1349 if (dev->power.runtime_auto)
1350 goto out;
1351
1352 dev->power.runtime_auto = true;
1353 if (atomic_dec_and_test(&dev->power.usage_count))
Rafael J. Wysockife7450b2016-06-29 02:53:48 +02001354 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001355
1356 out:
1357 spin_unlock_irq(&dev->power.lock);
1358}
1359EXPORT_SYMBOL_GPL(pm_runtime_allow);
1360
1361/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001362 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
Alan Stern7490e442010-09-25 23:35:15 +02001363 * @dev: Device to handle.
1364 *
1365 * Set the power.no_callbacks flag, which tells the PM core that this
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001366 * device is power-managed through its parent and has no runtime PM
1367 * callbacks of its own. The runtime sysfs attributes will be removed.
Alan Stern7490e442010-09-25 23:35:15 +02001368 */
1369void pm_runtime_no_callbacks(struct device *dev)
1370{
1371 spin_lock_irq(&dev->power.lock);
1372 dev->power.no_callbacks = 1;
1373 spin_unlock_irq(&dev->power.lock);
1374 if (device_is_registered(dev))
1375 rpm_sysfs_remove(dev);
1376}
1377EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1378
1379/**
Alan Sternc7b61de2010-12-01 00:14:42 +01001380 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1381 * @dev: Device to handle
1382 *
1383 * Set the power.irq_safe flag, which tells the PM core that the
1384 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1385 * always be invoked with the spinlock held and interrupts disabled. It also
1386 * causes the parent's usage counter to be permanently incremented, preventing
1387 * the parent from runtime suspending -- otherwise an irq-safe child might have
1388 * to wait for a non-irq-safe parent.
1389 */
1390void pm_runtime_irq_safe(struct device *dev)
1391{
1392 if (dev->parent)
1393 pm_runtime_get_sync(dev->parent);
1394 spin_lock_irq(&dev->power.lock);
1395 dev->power.irq_safe = 1;
1396 spin_unlock_irq(&dev->power.lock);
1397}
1398EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1399
1400/**
Alan Stern15bcb91d2010-09-25 23:35:21 +02001401 * update_autosuspend - Handle a change to a device's autosuspend settings.
1402 * @dev: Device to handle.
1403 * @old_delay: The former autosuspend_delay value.
1404 * @old_use: The former use_autosuspend value.
1405 *
1406 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1407 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1408 *
1409 * This function must be called under dev->power.lock with interrupts disabled.
1410 */
1411static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1412{
1413 int delay = dev->power.autosuspend_delay;
1414
1415 /* Should runtime suspend be prevented now? */
1416 if (dev->power.use_autosuspend && delay < 0) {
1417
1418 /* If it used to be allowed then prevent it. */
1419 if (!old_use || old_delay >= 0) {
1420 atomic_inc(&dev->power.usage_count);
1421 rpm_resume(dev, 0);
1422 }
1423 }
1424
1425 /* Runtime suspend should be allowed now. */
1426 else {
1427
1428 /* If it used to be prevented then allow it. */
1429 if (old_use && old_delay < 0)
1430 atomic_dec(&dev->power.usage_count);
1431
1432 /* Maybe we can autosuspend now. */
1433 rpm_idle(dev, RPM_AUTO);
1434 }
1435}
1436
1437/**
1438 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1439 * @dev: Device to handle.
1440 * @delay: Value of the new delay in milliseconds.
1441 *
1442 * Set the device's power.autosuspend_delay value. If it changes to negative
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001443 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1444 * changes the other way, allow runtime suspends.
Alan Stern15bcb91d2010-09-25 23:35:21 +02001445 */
1446void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1447{
1448 int old_delay, old_use;
1449
1450 spin_lock_irq(&dev->power.lock);
1451 old_delay = dev->power.autosuspend_delay;
1452 old_use = dev->power.use_autosuspend;
1453 dev->power.autosuspend_delay = delay;
1454 update_autosuspend(dev, old_delay, old_use);
1455 spin_unlock_irq(&dev->power.lock);
1456}
1457EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1458
1459/**
1460 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1461 * @dev: Device to handle.
1462 * @use: New value for use_autosuspend.
1463 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001464 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
Alan Stern15bcb91d2010-09-25 23:35:21 +02001465 * suspends as needed.
1466 */
1467void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1468{
1469 int old_delay, old_use;
1470
1471 spin_lock_irq(&dev->power.lock);
1472 old_delay = dev->power.autosuspend_delay;
1473 old_use = dev->power.use_autosuspend;
1474 dev->power.use_autosuspend = use;
1475 update_autosuspend(dev, old_delay, old_use);
1476 spin_unlock_irq(&dev->power.lock);
1477}
1478EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1479
1480/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001481 * pm_runtime_init - Initialize runtime PM fields in given device object.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001482 * @dev: Device object to initialize.
1483 */
1484void pm_runtime_init(struct device *dev)
1485{
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001486 dev->power.runtime_status = RPM_SUSPENDED;
1487 dev->power.idle_notification = false;
1488
1489 dev->power.disable_depth = 1;
1490 atomic_set(&dev->power.usage_count, 0);
1491
1492 dev->power.runtime_error = 0;
1493
1494 atomic_set(&dev->power.child_count, 0);
1495 pm_suspend_ignore_children(dev, false);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001496 dev->power.runtime_auto = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001497
1498 dev->power.request_pending = false;
1499 dev->power.request = RPM_REQ_NONE;
1500 dev->power.deferred_resume = false;
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001501 dev->power.accounting_timestamp = jiffies;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001502 INIT_WORK(&dev->power.work, pm_runtime_work);
1503
1504 dev->power.timer_expires = 0;
1505 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1506 (unsigned long)dev);
1507
1508 init_waitqueue_head(&dev->power.wait_queue);
1509}
1510
1511/**
Ulf Hansson5de85b92015-11-18 11:48:39 +01001512 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1513 * @dev: Device object to re-initialize.
1514 */
1515void pm_runtime_reinit(struct device *dev)
1516{
1517 if (!pm_runtime_enabled(dev)) {
1518 if (dev->power.runtime_status == RPM_ACTIVE)
1519 pm_runtime_set_suspended(dev);
1520 if (dev->power.irq_safe) {
1521 spin_lock_irq(&dev->power.lock);
1522 dev->power.irq_safe = 0;
1523 spin_unlock_irq(&dev->power.lock);
1524 if (dev->parent)
1525 pm_runtime_put(dev->parent);
1526 }
1527 }
1528}
1529
1530/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001531 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1532 * @dev: Device object being removed from device hierarchy.
1533 */
1534void pm_runtime_remove(struct device *dev)
1535{
1536 __pm_runtime_disable(dev, false);
Ulf Hansson5de85b92015-11-18 11:48:39 +01001537 pm_runtime_reinit(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001538}
Ulf Hansson37f20412014-03-01 11:56:05 +01001539
1540/**
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +01001541 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1542 * @dev: Device whose driver is going to be removed.
1543 *
1544 * Check links from this device to any consumers and if any of them have active
1545 * runtime PM references to the device, drop the usage counter of the device
1546 * (once per link).
1547 *
1548 * Links with the DL_FLAG_STATELESS flag set are ignored.
1549 *
1550 * Since the device is guaranteed to be runtime-active at the point this is
1551 * called, nothing else needs to be done here.
1552 *
1553 * Moreover, this is called after device_links_busy() has returned 'false', so
1554 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1555 * therefore rpm_active can't be manipulated concurrently.
1556 */
1557void pm_runtime_clean_up_links(struct device *dev)
1558{
1559 struct device_link *link;
1560 int idx;
1561
1562 idx = device_links_read_lock();
1563
1564 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1565 if (link->flags & DL_FLAG_STATELESS)
1566 continue;
1567
1568 if (link->rpm_active) {
1569 pm_runtime_put_noidle(dev);
1570 link->rpm_active = false;
1571 }
1572 }
1573
1574 device_links_read_unlock(idx);
1575}
1576
1577/**
1578 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1579 * @dev: Consumer device.
1580 */
1581void pm_runtime_get_suppliers(struct device *dev)
1582{
1583 struct device_link *link;
1584 int idx;
1585
1586 idx = device_links_read_lock();
1587
1588 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1589 if (link->flags & DL_FLAG_PM_RUNTIME)
1590 pm_runtime_get_sync(link->supplier);
1591
1592 device_links_read_unlock(idx);
1593}
1594
1595/**
1596 * pm_runtime_put_suppliers - Drop references to supplier devices.
1597 * @dev: Consumer device.
1598 */
1599void pm_runtime_put_suppliers(struct device *dev)
1600{
1601 struct device_link *link;
1602 int idx;
1603
1604 idx = device_links_read_lock();
1605
1606 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1607 if (link->flags & DL_FLAG_PM_RUNTIME)
1608 pm_runtime_put(link->supplier);
1609
1610 device_links_read_unlock(idx);
1611}
1612
Rafael J. Wysockibaa88092016-10-30 17:32:43 +01001613void pm_runtime_new_link(struct device *dev)
1614{
1615 spin_lock_irq(&dev->power.lock);
1616 dev->power.links_count++;
1617 spin_unlock_irq(&dev->power.lock);
1618}
1619
1620void pm_runtime_drop_link(struct device *dev)
1621{
1622 spin_lock_irq(&dev->power.lock);
1623 WARN_ON(dev->power.links_count == 0);
1624 dev->power.links_count--;
1625 spin_unlock_irq(&dev->power.lock);
1626}
1627
Rafael J. Wysocki21d5c572016-10-30 17:32:31 +01001628/**
Ulf Hansson37f20412014-03-01 11:56:05 +01001629 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1630 * @dev: Device to suspend.
1631 *
1632 * Disable runtime PM so we safely can check the device's runtime PM status and
1633 * if it is active, invoke it's .runtime_suspend callback to bring it into
1634 * suspend state. Keep runtime PM disabled to preserve the state unless we
1635 * encounter errors.
1636 *
1637 * Typically this function may be invoked from a system suspend callback to make
1638 * sure the device is put into low power state.
1639 */
1640int pm_runtime_force_suspend(struct device *dev)
1641{
1642 int (*callback)(struct device *);
1643 int ret = 0;
1644
1645 pm_runtime_disable(dev);
Ulf Hansson37f20412014-03-01 11:56:05 +01001646 if (pm_runtime_status_suspended(dev))
1647 return 0;
1648
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +02001649 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
Ulf Hansson37f20412014-03-01 11:56:05 +01001650
1651 if (!callback) {
1652 ret = -ENOSYS;
1653 goto err;
1654 }
1655
1656 ret = callback(dev);
1657 if (ret)
1658 goto err;
1659
Ulf Hansson1d9174f2016-10-13 16:58:54 +02001660 /*
1661 * Increase the runtime PM usage count for the device's parent, in case
1662 * when we find the device being used when system suspend was invoked.
1663 * This informs pm_runtime_force_resume() to resume the parent
1664 * immediately, which is needed to be able to resume its children,
1665 * when not deferring the resume to be managed via runtime PM.
1666 */
1667 if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
1668 pm_runtime_get_noresume(dev->parent);
1669
Ulf Hansson37f20412014-03-01 11:56:05 +01001670 pm_runtime_set_suspended(dev);
1671 return 0;
1672err:
1673 pm_runtime_enable(dev);
1674 return ret;
1675}
1676EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1677
1678/**
Ulf Hansson1d9174f2016-10-13 16:58:54 +02001679 * pm_runtime_force_resume - Force a device into resume state if needed.
Ulf Hansson37f20412014-03-01 11:56:05 +01001680 * @dev: Device to resume.
1681 *
1682 * Prior invoking this function we expect the user to have brought the device
1683 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
Ulf Hansson1d9174f2016-10-13 16:58:54 +02001684 * those actions and brings the device into full power, if it is expected to be
1685 * used on system resume. To distinguish that, we check whether the runtime PM
1686 * usage count is greater than 1 (the PM core increases the usage count in the
1687 * system PM prepare phase), as that indicates a real user (such as a subsystem,
1688 * driver, userspace, etc.) is using it. If that is the case, the device is
1689 * expected to be used on system resume as well, so then we resume it. In the
1690 * other case, we defer the resume to be managed via runtime PM.
Ulf Hansson37f20412014-03-01 11:56:05 +01001691 *
Ulf Hansson1d9174f2016-10-13 16:58:54 +02001692 * Typically this function may be invoked from a system resume callback.
Ulf Hansson37f20412014-03-01 11:56:05 +01001693 */
1694int pm_runtime_force_resume(struct device *dev)
1695{
1696 int (*callback)(struct device *);
1697 int ret = 0;
1698
Andrzej Hajdadbcd2d72014-10-17 12:58:02 +02001699 callback = RPM_GET_CALLBACK(dev, runtime_resume);
Ulf Hansson37f20412014-03-01 11:56:05 +01001700
1701 if (!callback) {
1702 ret = -ENOSYS;
1703 goto out;
1704 }
1705
Ulf Hansson9f5b5272016-05-30 11:33:12 +02001706 if (!pm_runtime_status_suspended(dev))
1707 goto out;
1708
Ulf Hansson1d9174f2016-10-13 16:58:54 +02001709 /*
1710 * Decrease the parent's runtime PM usage count, if we increased it
1711 * during system suspend in pm_runtime_force_suspend().
1712 */
1713 if (atomic_read(&dev->power.usage_count) > 1) {
1714 if (dev->parent)
1715 pm_runtime_put_noidle(dev->parent);
1716 } else {
1717 goto out;
1718 }
1719
Ulf Hansson0ae3aee2016-04-08 13:10:23 +02001720 ret = pm_runtime_set_active(dev);
Ulf Hansson37f20412014-03-01 11:56:05 +01001721 if (ret)
1722 goto out;
1723
Ulf Hansson0ae3aee2016-04-08 13:10:23 +02001724 ret = callback(dev);
1725 if (ret) {
1726 pm_runtime_set_suspended(dev);
1727 goto out;
1728 }
1729
Ulf Hansson37f20412014-03-01 11:56:05 +01001730 pm_runtime_mark_last_busy(dev);
1731out:
1732 pm_runtime_enable(dev);
1733 return ret;
1734}
1735EXPORT_SYMBOL_GPL(pm_runtime_force_resume);