clockevents: make device shutdown robust
The device shut down does not cleanup the next_event variable of the
clock event device. So when the device is reactivated the possible
stale next_event value can prevent the device to be reprogrammed as it
claims to wait on a event already.
This is the root cause of the resurfacing suspend/resume problem,
where systems need key press to come back to life.
Fix this by setting next_event to KTIME_MAX when the device is shut
down. Use a separate function for shutdown which takes care of that
and only keep the direct set mode call in the broadcast code, where we
can not touch the next_event value.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1876b52..f8d9680 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,6 +72,16 @@
}
/**
+ * clockevents_shutdown - shutdown the device and clear next_event
+ * @dev: device to shutdown
+ */
+void clockevents_shutdown(struct clock_event_device *dev)
+{
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+ dev->next_event.tv64 = KTIME_MAX;
+}
+
+/**
* clockevents_program_event - Reprogram the clock event device.
* @expires: absolute expiry time (monotonic clock)
*
@@ -206,7 +216,7 @@
if (new) {
BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
- clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(new);
}
local_irq_restore(flags);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2f5a382..f1f3eee 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -236,8 +236,7 @@
if (!cpu_isset(cpu, tick_broadcast_mask)) {
cpu_set(cpu, tick_broadcast_mask);
if (td->mode == TICKDEV_MODE_PERIODIC)
- clockevents_set_mode(dev,
- CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(dev);
}
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
tick_broadcast_force = 1;
@@ -254,7 +253,7 @@
if (cpus_empty(tick_broadcast_mask)) {
if (!bc_stopped)
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
} else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
@@ -306,7 +305,7 @@
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpus_empty(tick_broadcast_mask))
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
}
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -321,7 +320,7 @@
bc = tick_broadcast_device.evtdev;
if (bc)
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index c477719..019315e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -249,7 +249,7 @@
* not give it back to the clockevents layer !
*/
if (tick_is_broadcast_device(curdev)) {
- clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(curdev);
curdev = NULL;
}
clockevents_exchange_device(curdev, newdev);
@@ -311,7 +311,7 @@
unsigned long flags;
spin_lock_irqsave(&tick_device_lock, flags);
- clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(td->evtdev);
spin_unlock_irqrestore(&tick_device_lock, flags);
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 0ffc291..6e9db97 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -10,6 +10,8 @@
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
extern void tick_handle_periodic(struct clock_event_device *dev);
+extern void clockevents_shutdown(struct clock_event_device *dev);
+
/*
* NO_HZ / high resolution timer shared code
*/