Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, realtime implementation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright IBM Corporation, 2006 |
| 19 | * |
| 20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> |
| 21 | * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar |
| 22 | * for pushing me away from locks and towards counters, and |
| 23 | * to Suparna Bhattacharya for pushing me completely away |
| 24 | * from atomic instructions on the read side. |
| 25 | * |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 26 | * - Added handling of Dynamic Ticks |
| 27 | * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com> |
| 28 | * - Steven Rostedt <srostedt@redhat.com> |
| 29 | * |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 30 | * Papers: http://www.rdrop.com/users/paulmck/RCU |
| 31 | * |
| 32 | * Design Document: http://lwn.net/Articles/253651/ |
| 33 | * |
| 34 | * For detailed explanation of Read-Copy Update mechanism see - |
| 35 | * Documentation/RCU/ *.txt |
| 36 | * |
| 37 | */ |
| 38 | #include <linux/types.h> |
| 39 | #include <linux/kernel.h> |
| 40 | #include <linux/init.h> |
| 41 | #include <linux/spinlock.h> |
| 42 | #include <linux/smp.h> |
| 43 | #include <linux/rcupdate.h> |
| 44 | #include <linux/interrupt.h> |
| 45 | #include <linux/sched.h> |
| 46 | #include <asm/atomic.h> |
| 47 | #include <linux/bitops.h> |
| 48 | #include <linux/module.h> |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 49 | #include <linux/kthread.h> |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 50 | #include <linux/completion.h> |
| 51 | #include <linux/moduleparam.h> |
| 52 | #include <linux/percpu.h> |
| 53 | #include <linux/notifier.h> |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 54 | #include <linux/cpu.h> |
| 55 | #include <linux/random.h> |
| 56 | #include <linux/delay.h> |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 57 | #include <linux/cpumask.h> |
| 58 | #include <linux/rcupreempt_trace.h> |
Harvey Harrison | 1a651a0 | 2008-10-18 20:28:37 -0700 | [diff] [blame] | 59 | #include <asm/byteorder.h> |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 60 | |
| 61 | /* |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 62 | * PREEMPT_RCU data structures. |
| 63 | */ |
| 64 | |
| 65 | /* |
| 66 | * GP_STAGES specifies the number of times the state machine has |
| 67 | * to go through the all the rcu_try_flip_states (see below) |
| 68 | * in a single Grace Period. |
| 69 | * |
| 70 | * GP in GP_STAGES stands for Grace Period ;) |
| 71 | */ |
| 72 | #define GP_STAGES 2 |
| 73 | struct rcu_data { |
| 74 | spinlock_t lock; /* Protect rcu_data fields. */ |
| 75 | long completed; /* Number of last completed batch. */ |
| 76 | int waitlistcount; |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 77 | struct rcu_head *nextlist; |
| 78 | struct rcu_head **nexttail; |
| 79 | struct rcu_head *waitlist[GP_STAGES]; |
| 80 | struct rcu_head **waittail[GP_STAGES]; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 81 | struct rcu_head *donelist; /* from waitlist & waitschedlist */ |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 82 | struct rcu_head **donetail; |
| 83 | long rcu_flipctr[2]; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 84 | struct rcu_head *nextschedlist; |
| 85 | struct rcu_head **nextschedtail; |
| 86 | struct rcu_head *waitschedlist; |
| 87 | struct rcu_head **waitschedtail; |
| 88 | int rcu_sched_sleeping; |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 89 | #ifdef CONFIG_RCU_TRACE |
| 90 | struct rcupreempt_trace trace; |
| 91 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
| 92 | }; |
| 93 | |
| 94 | /* |
| 95 | * States for rcu_try_flip() and friends. |
| 96 | */ |
| 97 | |
| 98 | enum rcu_try_flip_states { |
| 99 | |
| 100 | /* |
| 101 | * Stay here if nothing is happening. Flip the counter if somthing |
| 102 | * starts happening. Denoted by "I" |
| 103 | */ |
| 104 | rcu_try_flip_idle_state, |
| 105 | |
| 106 | /* |
| 107 | * Wait here for all CPUs to notice that the counter has flipped. This |
| 108 | * prevents the old set of counters from ever being incremented once |
| 109 | * we leave this state, which in turn is necessary because we cannot |
| 110 | * test any individual counter for zero -- we can only check the sum. |
| 111 | * Denoted by "A". |
| 112 | */ |
| 113 | rcu_try_flip_waitack_state, |
| 114 | |
| 115 | /* |
| 116 | * Wait here for the sum of the old per-CPU counters to reach zero. |
| 117 | * Denoted by "Z". |
| 118 | */ |
| 119 | rcu_try_flip_waitzero_state, |
| 120 | |
| 121 | /* |
| 122 | * Wait here for each of the other CPUs to execute a memory barrier. |
| 123 | * This is necessary to ensure that these other CPUs really have |
| 124 | * completed executing their RCU read-side critical sections, despite |
| 125 | * their CPUs wildly reordering memory. Denoted by "M". |
| 126 | */ |
| 127 | rcu_try_flip_waitmb_state, |
| 128 | }; |
| 129 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 130 | /* |
| 131 | * States for rcu_ctrlblk.rcu_sched_sleep. |
| 132 | */ |
| 133 | |
| 134 | enum rcu_sched_sleep_states { |
| 135 | rcu_sched_not_sleeping, /* Not sleeping, callbacks need GP. */ |
| 136 | rcu_sched_sleep_prep, /* Thinking of sleeping, rechecking. */ |
| 137 | rcu_sched_sleeping, /* Sleeping, awaken if GP needed. */ |
| 138 | }; |
| 139 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 140 | struct rcu_ctrlblk { |
| 141 | spinlock_t fliplock; /* Protect state-machine transitions. */ |
| 142 | long completed; /* Number of last completed batch. */ |
| 143 | enum rcu_try_flip_states rcu_try_flip_state; /* The current state of |
| 144 | the rcu state machine */ |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 145 | spinlock_t schedlock; /* Protect rcu_sched sleep state. */ |
| 146 | enum rcu_sched_sleep_states sched_sleep; /* rcu_sched state. */ |
| 147 | wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 148 | }; |
| 149 | |
Ingo Molnar | a979241 | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 150 | struct rcu_dyntick_sched { |
| 151 | int dynticks; |
| 152 | int dynticks_snap; |
| 153 | int sched_qs; |
| 154 | int sched_qs_snap; |
| 155 | int sched_dynticks_snap; |
| 156 | }; |
| 157 | |
| 158 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { |
| 159 | .dynticks = 1, |
| 160 | }; |
| 161 | |
| 162 | void rcu_qsctr_inc(int cpu) |
| 163 | { |
| 164 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
| 165 | |
| 166 | rdssp->sched_qs++; |
| 167 | } |
| 168 | |
| 169 | #ifdef CONFIG_NO_HZ |
| 170 | |
| 171 | void rcu_enter_nohz(void) |
| 172 | { |
| 173 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
| 174 | |
| 175 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
| 176 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
| 177 | WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); |
| 178 | } |
| 179 | |
| 180 | void rcu_exit_nohz(void) |
| 181 | { |
| 182 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); |
| 183 | |
| 184 | __get_cpu_var(rcu_dyntick_sched).dynticks++; |
| 185 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
| 186 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), |
| 187 | &rs); |
| 188 | } |
| 189 | |
| 190 | #endif /* CONFIG_NO_HZ */ |
| 191 | |
| 192 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 193 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); |
Ingo Molnar | a979241 | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 194 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 195 | static struct rcu_ctrlblk rcu_ctrlblk = { |
| 196 | .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), |
| 197 | .completed = 0, |
| 198 | .rcu_try_flip_state = rcu_try_flip_idle_state, |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 199 | .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock), |
| 200 | .sched_sleep = rcu_sched_not_sleeping, |
| 201 | .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq), |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 202 | }; |
| 203 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 204 | static struct task_struct *rcu_sched_grace_period_task; |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 205 | |
| 206 | #ifdef CONFIG_RCU_TRACE |
| 207 | static char *rcu_try_flip_state_names[] = |
| 208 | { "idle", "waitack", "waitzero", "waitmb" }; |
| 209 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
| 210 | |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 211 | static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly |
| 212 | = CPU_BITS_NONE; |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 213 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 214 | /* |
| 215 | * Enum and per-CPU flag to determine when each CPU has seen |
| 216 | * the most recent counter flip. |
| 217 | */ |
| 218 | |
| 219 | enum rcu_flip_flag_values { |
| 220 | rcu_flip_seen, /* Steady/initial state, last flip seen. */ |
| 221 | /* Only GP detector can update. */ |
| 222 | rcu_flipped /* Flip just completed, need confirmation. */ |
| 223 | /* Only corresponding CPU can update. */ |
| 224 | }; |
| 225 | static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag) |
| 226 | = rcu_flip_seen; |
| 227 | |
| 228 | /* |
| 229 | * Enum and per-CPU flag to determine when each CPU has executed the |
| 230 | * needed memory barrier to fence in memory references from its last RCU |
| 231 | * read-side critical section in the just-completed grace period. |
| 232 | */ |
| 233 | |
| 234 | enum rcu_mb_flag_values { |
| 235 | rcu_mb_done, /* Steady/initial state, no mb()s required. */ |
| 236 | /* Only GP detector can update. */ |
| 237 | rcu_mb_needed /* Flip just completed, need an mb(). */ |
| 238 | /* Only corresponding CPU can update. */ |
| 239 | }; |
| 240 | static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag) |
| 241 | = rcu_mb_done; |
| 242 | |
| 243 | /* |
| 244 | * RCU_DATA_ME: find the current CPU's rcu_data structure. |
| 245 | * RCU_DATA_CPU: find the specified CPU's rcu_data structure. |
| 246 | */ |
| 247 | #define RCU_DATA_ME() (&__get_cpu_var(rcu_data)) |
| 248 | #define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu)) |
| 249 | |
| 250 | /* |
| 251 | * Helper macro for tracing when the appropriate rcu_data is not |
| 252 | * cached in a local variable, but where the CPU number is so cached. |
| 253 | */ |
| 254 | #define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace)); |
| 255 | |
| 256 | /* |
| 257 | * Helper macro for tracing when the appropriate rcu_data is not |
| 258 | * cached in a local variable. |
| 259 | */ |
| 260 | #define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace)); |
| 261 | |
| 262 | /* |
| 263 | * Helper macro for tracing when the appropriate rcu_data is pointed |
| 264 | * to by a local variable. |
| 265 | */ |
| 266 | #define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace)); |
| 267 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 268 | #define RCU_SCHED_BATCH_TIME (HZ / 50) |
| 269 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 270 | /* |
| 271 | * Return the number of RCU batches processed thus far. Useful |
| 272 | * for debug and statistics. |
| 273 | */ |
| 274 | long rcu_batches_completed(void) |
| 275 | { |
| 276 | return rcu_ctrlblk.completed; |
| 277 | } |
| 278 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
| 279 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 280 | void __rcu_read_lock(void) |
| 281 | { |
| 282 | int idx; |
| 283 | struct task_struct *t = current; |
| 284 | int nesting; |
| 285 | |
| 286 | nesting = ACCESS_ONCE(t->rcu_read_lock_nesting); |
| 287 | if (nesting != 0) { |
| 288 | |
| 289 | /* An earlier rcu_read_lock() covers us, just count it. */ |
| 290 | |
| 291 | t->rcu_read_lock_nesting = nesting + 1; |
| 292 | |
| 293 | } else { |
| 294 | unsigned long flags; |
| 295 | |
| 296 | /* |
| 297 | * We disable interrupts for the following reasons: |
| 298 | * - If we get scheduling clock interrupt here, and we |
| 299 | * end up acking the counter flip, it's like a promise |
| 300 | * that we will never increment the old counter again. |
| 301 | * Thus we will break that promise if that |
| 302 | * scheduling clock interrupt happens between the time |
| 303 | * we pick the .completed field and the time that we |
| 304 | * increment our counter. |
| 305 | * |
| 306 | * - We don't want to be preempted out here. |
| 307 | * |
| 308 | * NMIs can still occur, of course, and might themselves |
| 309 | * contain rcu_read_lock(). |
| 310 | */ |
| 311 | |
| 312 | local_irq_save(flags); |
| 313 | |
| 314 | /* |
| 315 | * Outermost nesting of rcu_read_lock(), so increment |
| 316 | * the current counter for the current CPU. Use volatile |
| 317 | * casts to prevent the compiler from reordering. |
| 318 | */ |
| 319 | |
| 320 | idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1; |
| 321 | ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++; |
| 322 | |
| 323 | /* |
| 324 | * Now that the per-CPU counter has been incremented, we |
| 325 | * are protected from races with rcu_read_lock() invoked |
| 326 | * from NMI handlers on this CPU. We can therefore safely |
| 327 | * increment the nesting counter, relieving further NMIs |
| 328 | * of the need to increment the per-CPU counter. |
| 329 | */ |
| 330 | |
| 331 | ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1; |
| 332 | |
| 333 | /* |
| 334 | * Now that we have preventing any NMIs from storing |
| 335 | * to the ->rcu_flipctr_idx, we can safely use it to |
| 336 | * remember which counter to decrement in the matching |
| 337 | * rcu_read_unlock(). |
| 338 | */ |
| 339 | |
| 340 | ACCESS_ONCE(t->rcu_flipctr_idx) = idx; |
| 341 | local_irq_restore(flags); |
| 342 | } |
| 343 | } |
| 344 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
| 345 | |
| 346 | void __rcu_read_unlock(void) |
| 347 | { |
| 348 | int idx; |
| 349 | struct task_struct *t = current; |
| 350 | int nesting; |
| 351 | |
| 352 | nesting = ACCESS_ONCE(t->rcu_read_lock_nesting); |
| 353 | if (nesting > 1) { |
| 354 | |
| 355 | /* |
| 356 | * We are still protected by the enclosing rcu_read_lock(), |
| 357 | * so simply decrement the counter. |
| 358 | */ |
| 359 | |
| 360 | t->rcu_read_lock_nesting = nesting - 1; |
| 361 | |
| 362 | } else { |
| 363 | unsigned long flags; |
| 364 | |
| 365 | /* |
| 366 | * Disable local interrupts to prevent the grace-period |
| 367 | * detection state machine from seeing us half-done. |
| 368 | * NMIs can still occur, of course, and might themselves |
| 369 | * contain rcu_read_lock() and rcu_read_unlock(). |
| 370 | */ |
| 371 | |
| 372 | local_irq_save(flags); |
| 373 | |
| 374 | /* |
| 375 | * Outermost nesting of rcu_read_unlock(), so we must |
| 376 | * decrement the current counter for the current CPU. |
| 377 | * This must be done carefully, because NMIs can |
| 378 | * occur at any point in this code, and any rcu_read_lock() |
| 379 | * and rcu_read_unlock() pairs in the NMI handlers |
| 380 | * must interact non-destructively with this code. |
| 381 | * Lots of volatile casts, and -very- careful ordering. |
| 382 | * |
| 383 | * Changes to this code, including this one, must be |
| 384 | * inspected, validated, and tested extremely carefully!!! |
| 385 | */ |
| 386 | |
| 387 | /* |
| 388 | * First, pick up the index. |
| 389 | */ |
| 390 | |
| 391 | idx = ACCESS_ONCE(t->rcu_flipctr_idx); |
| 392 | |
| 393 | /* |
| 394 | * Now that we have fetched the counter index, it is |
| 395 | * safe to decrement the per-task RCU nesting counter. |
| 396 | * After this, any interrupts or NMIs will increment and |
| 397 | * decrement the per-CPU counters. |
| 398 | */ |
| 399 | ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1; |
| 400 | |
| 401 | /* |
| 402 | * It is now safe to decrement this task's nesting count. |
| 403 | * NMIs that occur after this statement will route their |
| 404 | * rcu_read_lock() calls through this "else" clause, and |
| 405 | * will thus start incrementing the per-CPU counter on |
| 406 | * their own. They will also clobber ->rcu_flipctr_idx, |
| 407 | * but that is OK, since we have already fetched it. |
| 408 | */ |
| 409 | |
| 410 | ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--; |
| 411 | local_irq_restore(flags); |
| 412 | } |
| 413 | } |
| 414 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
| 415 | |
| 416 | /* |
| 417 | * If a global counter flip has occurred since the last time that we |
| 418 | * advanced callbacks, advance them. Hardware interrupts must be |
| 419 | * disabled when calling this function. |
| 420 | */ |
| 421 | static void __rcu_advance_callbacks(struct rcu_data *rdp) |
| 422 | { |
| 423 | int cpu; |
| 424 | int i; |
| 425 | int wlc = 0; |
| 426 | |
| 427 | if (rdp->completed != rcu_ctrlblk.completed) { |
| 428 | if (rdp->waitlist[GP_STAGES - 1] != NULL) { |
| 429 | *rdp->donetail = rdp->waitlist[GP_STAGES - 1]; |
| 430 | rdp->donetail = rdp->waittail[GP_STAGES - 1]; |
| 431 | RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp); |
| 432 | } |
| 433 | for (i = GP_STAGES - 2; i >= 0; i--) { |
| 434 | if (rdp->waitlist[i] != NULL) { |
| 435 | rdp->waitlist[i + 1] = rdp->waitlist[i]; |
| 436 | rdp->waittail[i + 1] = rdp->waittail[i]; |
| 437 | wlc++; |
| 438 | } else { |
| 439 | rdp->waitlist[i + 1] = NULL; |
| 440 | rdp->waittail[i + 1] = |
| 441 | &rdp->waitlist[i + 1]; |
| 442 | } |
| 443 | } |
| 444 | if (rdp->nextlist != NULL) { |
| 445 | rdp->waitlist[0] = rdp->nextlist; |
| 446 | rdp->waittail[0] = rdp->nexttail; |
| 447 | wlc++; |
| 448 | rdp->nextlist = NULL; |
| 449 | rdp->nexttail = &rdp->nextlist; |
| 450 | RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp); |
| 451 | } else { |
| 452 | rdp->waitlist[0] = NULL; |
| 453 | rdp->waittail[0] = &rdp->waitlist[0]; |
| 454 | } |
| 455 | rdp->waitlistcount = wlc; |
| 456 | rdp->completed = rcu_ctrlblk.completed; |
| 457 | } |
| 458 | |
| 459 | /* |
| 460 | * Check to see if this CPU needs to report that it has seen |
| 461 | * the most recent counter flip, thereby declaring that all |
| 462 | * subsequent rcu_read_lock() invocations will respect this flip. |
| 463 | */ |
| 464 | |
| 465 | cpu = raw_smp_processor_id(); |
| 466 | if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { |
| 467 | smp_mb(); /* Subsequent counter accesses must see new value */ |
| 468 | per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; |
| 469 | smp_mb(); /* Subsequent RCU read-side critical sections */ |
| 470 | /* seen -after- acknowledgement. */ |
| 471 | } |
| 472 | } |
| 473 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 474 | #ifdef CONFIG_NO_HZ |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 475 | static DEFINE_PER_CPU(int, rcu_update_flag); |
| 476 | |
| 477 | /** |
| 478 | * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI. |
| 479 | * |
| 480 | * If the CPU was idle with dynamic ticks active, this updates the |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 481 | * rcu_dyntick_sched.dynticks to let the RCU handling know that the |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 482 | * CPU is active. |
| 483 | */ |
| 484 | void rcu_irq_enter(void) |
| 485 | { |
| 486 | int cpu = smp_processor_id(); |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 487 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 488 | |
| 489 | if (per_cpu(rcu_update_flag, cpu)) |
| 490 | per_cpu(rcu_update_flag, cpu)++; |
| 491 | |
| 492 | /* |
| 493 | * Only update if we are coming from a stopped ticks mode |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 494 | * (rcu_dyntick_sched.dynticks is even). |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 495 | */ |
| 496 | if (!in_interrupt() && |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 497 | (rdssp->dynticks & 0x1) == 0) { |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 498 | /* |
| 499 | * The following might seem like we could have a race |
| 500 | * with NMI/SMIs. But this really isn't a problem. |
| 501 | * Here we do a read/modify/write, and the race happens |
| 502 | * when an NMI/SMI comes in after the read and before |
| 503 | * the write. But NMI/SMIs will increment this counter |
| 504 | * twice before returning, so the zero bit will not |
| 505 | * be corrupted by the NMI/SMI which is the most important |
| 506 | * part. |
| 507 | * |
| 508 | * The only thing is that we would bring back the counter |
| 509 | * to a postion that it was in during the NMI/SMI. |
| 510 | * But the zero bit would be set, so the rest of the |
| 511 | * counter would again be ignored. |
| 512 | * |
| 513 | * On return from the IRQ, the counter may have the zero |
| 514 | * bit be 0 and the counter the same as the return from |
| 515 | * the NMI/SMI. If the state machine was so unlucky to |
| 516 | * see that, it still doesn't matter, since all |
| 517 | * RCU read-side critical sections on this CPU would |
| 518 | * have already completed. |
| 519 | */ |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 520 | rdssp->dynticks++; |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 521 | /* |
| 522 | * The following memory barrier ensures that any |
| 523 | * rcu_read_lock() primitives in the irq handler |
| 524 | * are seen by other CPUs to follow the above |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 525 | * increment to rcu_dyntick_sched.dynticks. This is |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 526 | * required in order for other CPUs to correctly |
| 527 | * determine when it is safe to advance the RCU |
| 528 | * grace-period state machine. |
| 529 | */ |
| 530 | smp_mb(); /* see above block comment. */ |
| 531 | /* |
| 532 | * Since we can't determine the dynamic tick mode from |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 533 | * the rcu_dyntick_sched.dynticks after this routine, |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 534 | * we use a second flag to acknowledge that we came |
| 535 | * from an idle state with ticks stopped. |
| 536 | */ |
| 537 | per_cpu(rcu_update_flag, cpu)++; |
| 538 | /* |
| 539 | * If we take an NMI/SMI now, they will also increment |
| 540 | * the rcu_update_flag, and will not update the |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 541 | * rcu_dyntick_sched.dynticks on exit. That is for |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 542 | * this IRQ to do. |
| 543 | */ |
| 544 | } |
| 545 | } |
| 546 | |
| 547 | /** |
| 548 | * rcu_irq_exit - Called from exiting Hard irq context. |
| 549 | * |
| 550 | * If the CPU was idle with dynamic ticks active, update the |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 551 | * rcu_dyntick_sched.dynticks to put let the RCU handling be |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 552 | * aware that the CPU is going back to idle with no ticks. |
| 553 | */ |
| 554 | void rcu_irq_exit(void) |
| 555 | { |
| 556 | int cpu = smp_processor_id(); |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 557 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 558 | |
| 559 | /* |
| 560 | * rcu_update_flag is set if we interrupted the CPU |
| 561 | * when it was idle with ticks stopped. |
| 562 | * Once this occurs, we keep track of interrupt nesting |
| 563 | * because a NMI/SMI could also come in, and we still |
| 564 | * only want the IRQ that started the increment of the |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 565 | * rcu_dyntick_sched.dynticks to be the one that modifies |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 566 | * it on exit. |
| 567 | */ |
| 568 | if (per_cpu(rcu_update_flag, cpu)) { |
| 569 | if (--per_cpu(rcu_update_flag, cpu)) |
| 570 | return; |
| 571 | |
| 572 | /* This must match the interrupt nesting */ |
| 573 | WARN_ON(in_interrupt()); |
| 574 | |
| 575 | /* |
| 576 | * If an NMI/SMI happens now we are still |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 577 | * protected by the rcu_dyntick_sched.dynticks being odd. |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 578 | */ |
| 579 | |
| 580 | /* |
| 581 | * The following memory barrier ensures that any |
| 582 | * rcu_read_unlock() primitives in the irq handler |
| 583 | * are seen by other CPUs to preceed the following |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 584 | * increment to rcu_dyntick_sched.dynticks. This |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 585 | * is required in order for other CPUs to determine |
| 586 | * when it is safe to advance the RCU grace-period |
| 587 | * state machine. |
| 588 | */ |
| 589 | smp_mb(); /* see above block comment. */ |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 590 | rdssp->dynticks++; |
| 591 | WARN_ON(rdssp->dynticks & 0x1); |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 592 | } |
| 593 | } |
| 594 | |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 595 | void rcu_nmi_enter(void) |
| 596 | { |
| 597 | rcu_irq_enter(); |
| 598 | } |
| 599 | |
| 600 | void rcu_nmi_exit(void) |
| 601 | { |
| 602 | rcu_irq_exit(); |
| 603 | } |
| 604 | |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 605 | static void dyntick_save_progress_counter(int cpu) |
| 606 | { |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 607 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
| 608 | |
| 609 | rdssp->dynticks_snap = rdssp->dynticks; |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 610 | } |
| 611 | |
| 612 | static inline int |
| 613 | rcu_try_flip_waitack_needed(int cpu) |
| 614 | { |
| 615 | long curr; |
| 616 | long snap; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 617 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 618 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 619 | curr = rdssp->dynticks; |
| 620 | snap = rdssp->dynticks_snap; |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 621 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ |
| 622 | |
| 623 | /* |
| 624 | * If the CPU remained in dynticks mode for the entire time |
| 625 | * and didn't take any interrupts, NMIs, SMIs, or whatever, |
| 626 | * then it cannot be in the middle of an rcu_read_lock(), so |
| 627 | * the next rcu_read_lock() it executes must use the new value |
| 628 | * of the counter. So we can safely pretend that this CPU |
| 629 | * already acknowledged the counter. |
| 630 | */ |
| 631 | |
| 632 | if ((curr == snap) && ((curr & 0x1) == 0)) |
| 633 | return 0; |
| 634 | |
| 635 | /* |
| 636 | * If the CPU passed through or entered a dynticks idle phase with |
| 637 | * no active irq handlers, then, as above, we can safely pretend |
| 638 | * that this CPU already acknowledged the counter. |
| 639 | */ |
| 640 | |
Paul E. McKenney | d7c0651 | 2008-05-12 21:21:06 +0200 | [diff] [blame] | 641 | if ((curr - snap) > 2 || (curr & 0x1) == 0) |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 642 | return 0; |
| 643 | |
| 644 | /* We need this CPU to explicitly acknowledge the counter flip. */ |
| 645 | |
| 646 | return 1; |
| 647 | } |
| 648 | |
| 649 | static inline int |
| 650 | rcu_try_flip_waitmb_needed(int cpu) |
| 651 | { |
| 652 | long curr; |
| 653 | long snap; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 654 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 655 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 656 | curr = rdssp->dynticks; |
| 657 | snap = rdssp->dynticks_snap; |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 658 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ |
| 659 | |
| 660 | /* |
| 661 | * If the CPU remained in dynticks mode for the entire time |
| 662 | * and didn't take any interrupts, NMIs, SMIs, or whatever, |
| 663 | * then it cannot have executed an RCU read-side critical section |
| 664 | * during that time, so there is no need for it to execute a |
| 665 | * memory barrier. |
| 666 | */ |
| 667 | |
| 668 | if ((curr == snap) && ((curr & 0x1) == 0)) |
| 669 | return 0; |
| 670 | |
| 671 | /* |
| 672 | * If the CPU either entered or exited an outermost interrupt, |
| 673 | * SMI, NMI, or whatever handler, then we know that it executed |
| 674 | * a memory barrier when doing so. So we don't need another one. |
| 675 | */ |
| 676 | if (curr != snap) |
| 677 | return 0; |
| 678 | |
| 679 | /* We need the CPU to execute a memory barrier. */ |
| 680 | |
| 681 | return 1; |
| 682 | } |
| 683 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 684 | static void dyntick_save_progress_counter_sched(int cpu) |
| 685 | { |
| 686 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
| 687 | |
| 688 | rdssp->sched_dynticks_snap = rdssp->dynticks; |
| 689 | } |
| 690 | |
| 691 | static int rcu_qsctr_inc_needed_dyntick(int cpu) |
| 692 | { |
| 693 | long curr; |
| 694 | long snap; |
| 695 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
| 696 | |
| 697 | curr = rdssp->dynticks; |
| 698 | snap = rdssp->sched_dynticks_snap; |
| 699 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ |
| 700 | |
| 701 | /* |
| 702 | * If the CPU remained in dynticks mode for the entire time |
| 703 | * and didn't take any interrupts, NMIs, SMIs, or whatever, |
| 704 | * then it cannot be in the middle of an rcu_read_lock(), so |
| 705 | * the next rcu_read_lock() it executes must use the new value |
| 706 | * of the counter. Therefore, this CPU has been in a quiescent |
| 707 | * state the entire time, and we don't need to wait for it. |
| 708 | */ |
| 709 | |
| 710 | if ((curr == snap) && ((curr & 0x1) == 0)) |
| 711 | return 0; |
| 712 | |
| 713 | /* |
| 714 | * If the CPU passed through or entered a dynticks idle phase with |
| 715 | * no active irq handlers, then, as above, this CPU has already |
| 716 | * passed through a quiescent state. |
| 717 | */ |
| 718 | |
| 719 | if ((curr - snap) > 2 || (snap & 0x1) == 0) |
| 720 | return 0; |
| 721 | |
| 722 | /* We need this CPU to go through a quiescent state. */ |
| 723 | |
| 724 | return 1; |
| 725 | } |
| 726 | |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 727 | #else /* !CONFIG_NO_HZ */ |
| 728 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 729 | # define dyntick_save_progress_counter(cpu) do { } while (0) |
| 730 | # define rcu_try_flip_waitack_needed(cpu) (1) |
| 731 | # define rcu_try_flip_waitmb_needed(cpu) (1) |
| 732 | |
| 733 | # define dyntick_save_progress_counter_sched(cpu) do { } while (0) |
| 734 | # define rcu_qsctr_inc_needed_dyntick(cpu) (1) |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 735 | |
| 736 | #endif /* CONFIG_NO_HZ */ |
| 737 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 738 | static void save_qsctr_sched(int cpu) |
| 739 | { |
| 740 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
| 741 | |
| 742 | rdssp->sched_qs_snap = rdssp->sched_qs; |
| 743 | } |
| 744 | |
| 745 | static inline int rcu_qsctr_inc_needed(int cpu) |
| 746 | { |
| 747 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
| 748 | |
| 749 | /* |
| 750 | * If there has been a quiescent state, no more need to wait |
| 751 | * on this CPU. |
| 752 | */ |
| 753 | |
| 754 | if (rdssp->sched_qs != rdssp->sched_qs_snap) { |
| 755 | smp_mb(); /* force ordering with cpu entering schedule(). */ |
| 756 | return 0; |
| 757 | } |
| 758 | |
| 759 | /* We need this CPU to go through a quiescent state. */ |
| 760 | |
| 761 | return 1; |
| 762 | } |
| 763 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 764 | /* |
| 765 | * Get here when RCU is idle. Decide whether we need to |
| 766 | * move out of idle state, and return non-zero if so. |
| 767 | * "Straightforward" approach for the moment, might later |
| 768 | * use callback-list lengths, grace-period duration, or |
| 769 | * some such to determine when to exit idle state. |
| 770 | * Might also need a pre-idle test that does not acquire |
| 771 | * the lock, but let's get the simple case working first... |
| 772 | */ |
| 773 | |
| 774 | static int |
| 775 | rcu_try_flip_idle(void) |
| 776 | { |
| 777 | int cpu; |
| 778 | |
| 779 | RCU_TRACE_ME(rcupreempt_trace_try_flip_i1); |
| 780 | if (!rcu_pending(smp_processor_id())) { |
| 781 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1); |
| 782 | return 0; |
| 783 | } |
| 784 | |
| 785 | /* |
| 786 | * Do the flip. |
| 787 | */ |
| 788 | |
| 789 | RCU_TRACE_ME(rcupreempt_trace_try_flip_g1); |
| 790 | rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */ |
| 791 | |
| 792 | /* |
| 793 | * Need a memory barrier so that other CPUs see the new |
| 794 | * counter value before they see the subsequent change of all |
| 795 | * the rcu_flip_flag instances to rcu_flipped. |
| 796 | */ |
| 797 | |
| 798 | smp_mb(); /* see above block comment. */ |
| 799 | |
| 800 | /* Now ask each CPU for acknowledgement of the flip. */ |
| 801 | |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 802 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 803 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 804 | dyntick_save_progress_counter(cpu); |
| 805 | } |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 806 | |
| 807 | return 1; |
| 808 | } |
| 809 | |
| 810 | /* |
| 811 | * Wait for CPUs to acknowledge the flip. |
| 812 | */ |
| 813 | |
| 814 | static int |
| 815 | rcu_try_flip_waitack(void) |
| 816 | { |
| 817 | int cpu; |
| 818 | |
| 819 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 820 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 821 | if (rcu_try_flip_waitack_needed(cpu) && |
| 822 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 823 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
| 824 | return 0; |
| 825 | } |
| 826 | |
| 827 | /* |
| 828 | * Make sure our checks above don't bleed into subsequent |
| 829 | * waiting for the sum of the counters to reach zero. |
| 830 | */ |
| 831 | |
| 832 | smp_mb(); /* see above block comment. */ |
| 833 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a2); |
| 834 | return 1; |
| 835 | } |
| 836 | |
| 837 | /* |
| 838 | * Wait for collective ``last'' counter to reach zero, |
| 839 | * then tell all CPUs to do an end-of-grace-period memory barrier. |
| 840 | */ |
| 841 | |
| 842 | static int |
| 843 | rcu_try_flip_waitzero(void) |
| 844 | { |
| 845 | int cpu; |
| 846 | int lastidx = !(rcu_ctrlblk.completed & 0x1); |
| 847 | int sum = 0; |
| 848 | |
| 849 | /* Check to see if the sum of the "last" counters is zero. */ |
| 850 | |
| 851 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 852 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 853 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
| 854 | if (sum != 0) { |
| 855 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
| 856 | return 0; |
| 857 | } |
| 858 | |
| 859 | /* |
| 860 | * This ensures that the other CPUs see the call for |
| 861 | * memory barriers -after- the sum to zero has been |
| 862 | * detected here |
| 863 | */ |
| 864 | smp_mb(); /* ^^^^^^^^^^^^ */ |
| 865 | |
| 866 | /* Call for a memory barrier from each CPU. */ |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 867 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 868 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 869 | dyntick_save_progress_counter(cpu); |
| 870 | } |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 871 | |
| 872 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z2); |
| 873 | return 1; |
| 874 | } |
| 875 | |
| 876 | /* |
| 877 | * Wait for all CPUs to do their end-of-grace-period memory barrier. |
| 878 | * Return 0 once all CPUs have done so. |
| 879 | */ |
| 880 | |
| 881 | static int |
| 882 | rcu_try_flip_waitmb(void) |
| 883 | { |
| 884 | int cpu; |
| 885 | |
| 886 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 887 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 888 | if (rcu_try_flip_waitmb_needed(cpu) && |
| 889 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 890 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
| 891 | return 0; |
| 892 | } |
| 893 | |
| 894 | smp_mb(); /* Ensure that the above checks precede any following flip. */ |
| 895 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m2); |
| 896 | return 1; |
| 897 | } |
| 898 | |
| 899 | /* |
| 900 | * Attempt a single flip of the counters. Remember, a single flip does |
| 901 | * -not- constitute a grace period. Instead, the interval between |
| 902 | * at least GP_STAGES consecutive flips is a grace period. |
| 903 | * |
| 904 | * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation |
| 905 | * on a large SMP, they might want to use a hierarchical organization of |
| 906 | * the per-CPU-counter pairs. |
| 907 | */ |
| 908 | static void rcu_try_flip(void) |
| 909 | { |
| 910 | unsigned long flags; |
| 911 | |
| 912 | RCU_TRACE_ME(rcupreempt_trace_try_flip_1); |
| 913 | if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) { |
| 914 | RCU_TRACE_ME(rcupreempt_trace_try_flip_e1); |
| 915 | return; |
| 916 | } |
| 917 | |
| 918 | /* |
| 919 | * Take the next transition(s) through the RCU grace-period |
| 920 | * flip-counter state machine. |
| 921 | */ |
| 922 | |
| 923 | switch (rcu_ctrlblk.rcu_try_flip_state) { |
| 924 | case rcu_try_flip_idle_state: |
| 925 | if (rcu_try_flip_idle()) |
| 926 | rcu_ctrlblk.rcu_try_flip_state = |
| 927 | rcu_try_flip_waitack_state; |
| 928 | break; |
| 929 | case rcu_try_flip_waitack_state: |
| 930 | if (rcu_try_flip_waitack()) |
| 931 | rcu_ctrlblk.rcu_try_flip_state = |
| 932 | rcu_try_flip_waitzero_state; |
| 933 | break; |
| 934 | case rcu_try_flip_waitzero_state: |
| 935 | if (rcu_try_flip_waitzero()) |
| 936 | rcu_ctrlblk.rcu_try_flip_state = |
| 937 | rcu_try_flip_waitmb_state; |
| 938 | break; |
| 939 | case rcu_try_flip_waitmb_state: |
| 940 | if (rcu_try_flip_waitmb()) |
| 941 | rcu_ctrlblk.rcu_try_flip_state = |
| 942 | rcu_try_flip_idle_state; |
| 943 | } |
| 944 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
| 945 | } |
| 946 | |
| 947 | /* |
| 948 | * Check to see if this CPU needs to do a memory barrier in order to |
| 949 | * ensure that any prior RCU read-side critical sections have committed |
| 950 | * their counter manipulations and critical-section memory references |
| 951 | * before declaring the grace period to be completed. |
| 952 | */ |
| 953 | static void rcu_check_mb(int cpu) |
| 954 | { |
| 955 | if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) { |
| 956 | smp_mb(); /* Ensure RCU read-side accesses are visible. */ |
| 957 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_done; |
| 958 | } |
| 959 | } |
| 960 | |
| 961 | void rcu_check_callbacks(int cpu, int user) |
| 962 | { |
| 963 | unsigned long flags; |
| 964 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
| 965 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 966 | /* |
| 967 | * If this CPU took its interrupt from user mode or from the |
| 968 | * idle loop, and this is not a nested interrupt, then |
| 969 | * this CPU has to have exited all prior preept-disable |
| 970 | * sections of code. So increment the counter to note this. |
| 971 | * |
| 972 | * The memory barrier is needed to handle the case where |
| 973 | * writes from a preempt-disable section of code get reordered |
| 974 | * into schedule() by this CPU's write buffer. So the memory |
| 975 | * barrier makes sure that the rcu_qsctr_inc() is seen by other |
| 976 | * CPUs to happen after any such write. |
| 977 | */ |
| 978 | |
| 979 | if (user || |
| 980 | (idle_cpu(cpu) && !in_softirq() && |
| 981 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| 982 | smp_mb(); /* Guard against aggressive schedule(). */ |
| 983 | rcu_qsctr_inc(cpu); |
| 984 | } |
| 985 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 986 | rcu_check_mb(cpu); |
| 987 | if (rcu_ctrlblk.completed == rdp->completed) |
| 988 | rcu_try_flip(); |
| 989 | spin_lock_irqsave(&rdp->lock, flags); |
| 990 | RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp); |
| 991 | __rcu_advance_callbacks(rdp); |
| 992 | if (rdp->donelist == NULL) { |
| 993 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 994 | } else { |
| 995 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 996 | raise_softirq(RCU_SOFTIRQ); |
| 997 | } |
| 998 | } |
| 999 | |
| 1000 | /* |
| 1001 | * Needed by dynticks, to make sure all RCU processing has finished |
| 1002 | * when we go idle: |
| 1003 | */ |
| 1004 | void rcu_advance_callbacks(int cpu, int user) |
| 1005 | { |
| 1006 | unsigned long flags; |
| 1007 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
| 1008 | |
| 1009 | if (rcu_ctrlblk.completed == rdp->completed) { |
| 1010 | rcu_try_flip(); |
| 1011 | if (rcu_ctrlblk.completed == rdp->completed) |
| 1012 | return; |
| 1013 | } |
| 1014 | spin_lock_irqsave(&rdp->lock, flags); |
| 1015 | RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp); |
| 1016 | __rcu_advance_callbacks(rdp); |
| 1017 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1018 | } |
| 1019 | |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1020 | #ifdef CONFIG_HOTPLUG_CPU |
| 1021 | #define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \ |
| 1022 | *dsttail = srclist; \ |
| 1023 | if (srclist != NULL) { \ |
| 1024 | dsttail = srctail; \ |
| 1025 | srclist = NULL; \ |
| 1026 | srctail = &srclist;\ |
| 1027 | } \ |
| 1028 | } while (0) |
| 1029 | |
| 1030 | void rcu_offline_cpu(int cpu) |
| 1031 | { |
| 1032 | int i; |
| 1033 | struct rcu_head *list = NULL; |
| 1034 | unsigned long flags; |
| 1035 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1036 | struct rcu_head *schedlist = NULL; |
| 1037 | struct rcu_head **schedtail = &schedlist; |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1038 | struct rcu_head **tail = &list; |
| 1039 | |
| 1040 | /* |
| 1041 | * Remove all callbacks from the newly dead CPU, retaining order. |
| 1042 | * Otherwise rcu_barrier() will fail |
| 1043 | */ |
| 1044 | |
| 1045 | spin_lock_irqsave(&rdp->lock, flags); |
| 1046 | rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail); |
| 1047 | for (i = GP_STAGES - 1; i >= 0; i--) |
| 1048 | rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i], |
| 1049 | list, tail); |
| 1050 | rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail); |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1051 | rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail, |
| 1052 | schedlist, schedtail); |
| 1053 | rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail, |
| 1054 | schedlist, schedtail); |
| 1055 | rdp->rcu_sched_sleeping = 0; |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1056 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1057 | rdp->waitlistcount = 0; |
| 1058 | |
| 1059 | /* Disengage the newly dead CPU from the grace-period computation. */ |
| 1060 | |
| 1061 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); |
| 1062 | rcu_check_mb(cpu); |
| 1063 | if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { |
| 1064 | smp_mb(); /* Subsequent counter accesses must see new value */ |
| 1065 | per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; |
| 1066 | smp_mb(); /* Subsequent RCU read-side critical sections */ |
| 1067 | /* seen -after- acknowledgement. */ |
| 1068 | } |
| 1069 | |
| 1070 | RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0]; |
| 1071 | RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1]; |
| 1072 | |
| 1073 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; |
| 1074 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; |
| 1075 | |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 1076 | cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1077 | |
| 1078 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
| 1079 | |
| 1080 | /* |
| 1081 | * Place the removed callbacks on the current CPU's queue. |
| 1082 | * Make them all start a new grace period: simple approach, |
| 1083 | * in theory could starve a given set of callbacks, but |
| 1084 | * you would need to be doing some serious CPU hotplugging |
| 1085 | * to make this happen. If this becomes a problem, adding |
| 1086 | * a synchronize_rcu() to the hotplug path would be a simple |
| 1087 | * fix. |
| 1088 | */ |
| 1089 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1090 | local_irq_save(flags); /* disable preempt till we know what lock. */ |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1091 | rdp = RCU_DATA_ME(); |
Paul E. McKenney | ae77886 | 2008-02-27 16:21:10 -0800 | [diff] [blame] | 1092 | spin_lock(&rdp->lock); |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1093 | *rdp->nexttail = list; |
| 1094 | if (list) |
| 1095 | rdp->nexttail = tail; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1096 | *rdp->nextschedtail = schedlist; |
| 1097 | if (schedlist) |
| 1098 | rdp->nextschedtail = schedtail; |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1099 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1100 | } |
| 1101 | |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1102 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 1103 | |
| 1104 | void rcu_offline_cpu(int cpu) |
| 1105 | { |
| 1106 | } |
| 1107 | |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1108 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ |
| 1109 | |
Nick Piggin | 70ff055 | 2008-07-10 17:25:35 +1000 | [diff] [blame] | 1110 | void __cpuinit rcu_online_cpu(int cpu) |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1111 | { |
| 1112 | unsigned long flags; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1113 | struct rcu_data *rdp; |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1114 | |
| 1115 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 1116 | cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1117 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1118 | |
| 1119 | /* |
| 1120 | * The rcu_sched grace-period processing might have bypassed |
| 1121 | * this CPU, given that it was not in the rcu_cpu_online_map |
| 1122 | * when the grace-period scan started. This means that the |
| 1123 | * grace-period task might sleep. So make sure that if this |
| 1124 | * should happen, the first callback posted to this CPU will |
| 1125 | * wake up the grace-period task if need be. |
| 1126 | */ |
| 1127 | |
| 1128 | rdp = RCU_DATA_CPU(cpu); |
| 1129 | spin_lock_irqsave(&rdp->lock, flags); |
| 1130 | rdp->rcu_sched_sleeping = 1; |
| 1131 | spin_unlock_irqrestore(&rdp->lock, flags); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1132 | } |
| 1133 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1134 | static void rcu_process_callbacks(struct softirq_action *unused) |
| 1135 | { |
| 1136 | unsigned long flags; |
| 1137 | struct rcu_head *next, *list; |
Paul E. McKenney | c9e7100 | 2008-02-28 11:51:07 -0800 | [diff] [blame] | 1138 | struct rcu_data *rdp; |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1139 | |
Paul E. McKenney | c9e7100 | 2008-02-28 11:51:07 -0800 | [diff] [blame] | 1140 | local_irq_save(flags); |
| 1141 | rdp = RCU_DATA_ME(); |
| 1142 | spin_lock(&rdp->lock); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1143 | list = rdp->donelist; |
| 1144 | if (list == NULL) { |
| 1145 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1146 | return; |
| 1147 | } |
| 1148 | rdp->donelist = NULL; |
| 1149 | rdp->donetail = &rdp->donelist; |
| 1150 | RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp); |
| 1151 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1152 | while (list) { |
| 1153 | next = list->next; |
| 1154 | list->func(list); |
| 1155 | list = next; |
| 1156 | RCU_TRACE_ME(rcupreempt_trace_invoke); |
| 1157 | } |
| 1158 | } |
| 1159 | |
| 1160 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
| 1161 | { |
| 1162 | unsigned long flags; |
| 1163 | struct rcu_data *rdp; |
| 1164 | |
| 1165 | head->func = func; |
| 1166 | head->next = NULL; |
| 1167 | local_irq_save(flags); |
| 1168 | rdp = RCU_DATA_ME(); |
| 1169 | spin_lock(&rdp->lock); |
| 1170 | __rcu_advance_callbacks(rdp); |
| 1171 | *rdp->nexttail = head; |
| 1172 | rdp->nexttail = &head->next; |
| 1173 | RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp); |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1174 | spin_unlock_irqrestore(&rdp->lock, flags); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1175 | } |
| 1176 | EXPORT_SYMBOL_GPL(call_rcu); |
| 1177 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1178 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
| 1179 | { |
| 1180 | unsigned long flags; |
| 1181 | struct rcu_data *rdp; |
| 1182 | int wake_gp = 0; |
| 1183 | |
| 1184 | head->func = func; |
| 1185 | head->next = NULL; |
| 1186 | local_irq_save(flags); |
| 1187 | rdp = RCU_DATA_ME(); |
| 1188 | spin_lock(&rdp->lock); |
| 1189 | *rdp->nextschedtail = head; |
| 1190 | rdp->nextschedtail = &head->next; |
| 1191 | if (rdp->rcu_sched_sleeping) { |
| 1192 | |
| 1193 | /* Grace-period processing might be sleeping... */ |
| 1194 | |
| 1195 | rdp->rcu_sched_sleeping = 0; |
| 1196 | wake_gp = 1; |
| 1197 | } |
| 1198 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1199 | if (wake_gp) { |
| 1200 | |
| 1201 | /* Wake up grace-period processing, unless someone beat us. */ |
| 1202 | |
| 1203 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); |
| 1204 | if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping) |
| 1205 | wake_gp = 0; |
| 1206 | rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping; |
| 1207 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); |
| 1208 | if (wake_gp) |
| 1209 | wake_up_interruptible(&rcu_ctrlblk.sched_wq); |
| 1210 | } |
| 1211 | } |
| 1212 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
| 1213 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1214 | /* |
| 1215 | * Wait until all currently running preempt_disable() code segments |
| 1216 | * (including hardware-irq-disable segments) complete. Note that |
| 1217 | * in -rt this does -not- necessarily result in all currently executing |
| 1218 | * interrupt -handlers- having completed. |
| 1219 | */ |
Paul E. McKenney | ea7d3fe | 2009-01-04 13:03:02 -0800 | [diff] [blame] | 1220 | void __synchronize_sched(void) |
| 1221 | { |
| 1222 | struct rcu_synchronize rcu; |
| 1223 | |
Paul E. McKenney | a682604 | 2009-02-25 18:03:42 -0800 | [diff] [blame] | 1224 | if (num_online_cpus() == 1) |
| 1225 | return; /* blocking is gp if only one CPU! */ |
| 1226 | |
Paul E. McKenney | ea7d3fe | 2009-01-04 13:03:02 -0800 | [diff] [blame] | 1227 | init_completion(&rcu.completion); |
| 1228 | /* Will wake me after RCU finished. */ |
| 1229 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
| 1230 | /* Wait for it. */ |
| 1231 | wait_for_completion(&rcu.completion); |
| 1232 | } |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1233 | EXPORT_SYMBOL_GPL(__synchronize_sched); |
| 1234 | |
| 1235 | /* |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1236 | * kthread function that manages call_rcu_sched grace periods. |
| 1237 | */ |
| 1238 | static int rcu_sched_grace_period(void *arg) |
| 1239 | { |
| 1240 | int couldsleep; /* might sleep after current pass. */ |
| 1241 | int couldsleepnext = 0; /* might sleep after next pass. */ |
| 1242 | int cpu; |
| 1243 | unsigned long flags; |
| 1244 | struct rcu_data *rdp; |
| 1245 | int ret; |
| 1246 | |
| 1247 | /* |
| 1248 | * Each pass through the following loop handles one |
| 1249 | * rcu_sched grace period cycle. |
| 1250 | */ |
| 1251 | do { |
| 1252 | /* Save each CPU's current state. */ |
| 1253 | |
| 1254 | for_each_online_cpu(cpu) { |
| 1255 | dyntick_save_progress_counter_sched(cpu); |
| 1256 | save_qsctr_sched(cpu); |
| 1257 | } |
| 1258 | |
| 1259 | /* |
| 1260 | * Sleep for about an RCU grace-period's worth to |
| 1261 | * allow better batching and to consume less CPU. |
| 1262 | */ |
| 1263 | schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME); |
| 1264 | |
| 1265 | /* |
| 1266 | * If there was nothing to do last time, prepare to |
| 1267 | * sleep at the end of the current grace period cycle. |
| 1268 | */ |
| 1269 | couldsleep = couldsleepnext; |
| 1270 | couldsleepnext = 1; |
| 1271 | if (couldsleep) { |
| 1272 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); |
| 1273 | rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep; |
| 1274 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); |
| 1275 | } |
| 1276 | |
| 1277 | /* |
| 1278 | * Wait on each CPU in turn to have either visited |
| 1279 | * a quiescent state or been in dynticks-idle mode. |
| 1280 | */ |
| 1281 | for_each_online_cpu(cpu) { |
| 1282 | while (rcu_qsctr_inc_needed(cpu) && |
| 1283 | rcu_qsctr_inc_needed_dyntick(cpu)) { |
| 1284 | /* resched_cpu(cpu); @@@ */ |
| 1285 | schedule_timeout_interruptible(1); |
| 1286 | } |
| 1287 | } |
| 1288 | |
| 1289 | /* Advance callbacks for each CPU. */ |
| 1290 | |
| 1291 | for_each_online_cpu(cpu) { |
| 1292 | |
| 1293 | rdp = RCU_DATA_CPU(cpu); |
| 1294 | spin_lock_irqsave(&rdp->lock, flags); |
| 1295 | |
| 1296 | /* |
| 1297 | * We are running on this CPU irq-disabled, so no |
| 1298 | * CPU can go offline until we re-enable irqs. |
| 1299 | * The current CPU might have already gone |
| 1300 | * offline (between the for_each_offline_cpu and |
| 1301 | * the spin_lock_irqsave), but in that case all its |
| 1302 | * callback lists will be empty, so no harm done. |
| 1303 | * |
| 1304 | * Advance the callbacks! We share normal RCU's |
| 1305 | * donelist, since callbacks are invoked the |
| 1306 | * same way in either case. |
| 1307 | */ |
| 1308 | if (rdp->waitschedlist != NULL) { |
| 1309 | *rdp->donetail = rdp->waitschedlist; |
| 1310 | rdp->donetail = rdp->waitschedtail; |
| 1311 | |
| 1312 | /* |
| 1313 | * Next rcu_check_callbacks() will |
| 1314 | * do the required raise_softirq(). |
| 1315 | */ |
| 1316 | } |
| 1317 | if (rdp->nextschedlist != NULL) { |
| 1318 | rdp->waitschedlist = rdp->nextschedlist; |
| 1319 | rdp->waitschedtail = rdp->nextschedtail; |
| 1320 | couldsleep = 0; |
| 1321 | couldsleepnext = 0; |
| 1322 | } else { |
| 1323 | rdp->waitschedlist = NULL; |
| 1324 | rdp->waitschedtail = &rdp->waitschedlist; |
| 1325 | } |
| 1326 | rdp->nextschedlist = NULL; |
| 1327 | rdp->nextschedtail = &rdp->nextschedlist; |
| 1328 | |
| 1329 | /* Mark sleep intention. */ |
| 1330 | |
| 1331 | rdp->rcu_sched_sleeping = couldsleep; |
| 1332 | |
| 1333 | spin_unlock_irqrestore(&rdp->lock, flags); |
| 1334 | } |
| 1335 | |
| 1336 | /* If we saw callbacks on the last scan, go deal with them. */ |
| 1337 | |
| 1338 | if (!couldsleep) |
| 1339 | continue; |
| 1340 | |
| 1341 | /* Attempt to block... */ |
| 1342 | |
| 1343 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); |
| 1344 | if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) { |
| 1345 | |
| 1346 | /* |
| 1347 | * Someone posted a callback after we scanned. |
| 1348 | * Go take care of it. |
| 1349 | */ |
| 1350 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); |
| 1351 | couldsleepnext = 0; |
| 1352 | continue; |
| 1353 | } |
| 1354 | |
| 1355 | /* Block until the next person posts a callback. */ |
| 1356 | |
| 1357 | rcu_ctrlblk.sched_sleep = rcu_sched_sleeping; |
| 1358 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); |
| 1359 | ret = 0; |
| 1360 | __wait_event_interruptible(rcu_ctrlblk.sched_wq, |
| 1361 | rcu_ctrlblk.sched_sleep != rcu_sched_sleeping, |
| 1362 | ret); |
| 1363 | |
| 1364 | /* |
| 1365 | * Signals would prevent us from sleeping, and we cannot |
| 1366 | * do much with them in any case. So flush them. |
| 1367 | */ |
| 1368 | if (ret) |
| 1369 | flush_signals(current); |
| 1370 | couldsleepnext = 0; |
| 1371 | |
| 1372 | } while (!kthread_should_stop()); |
| 1373 | |
| 1374 | return (0); |
| 1375 | } |
| 1376 | |
| 1377 | /* |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1378 | * Check to see if any future RCU-related work will need to be done |
| 1379 | * by the current CPU, even if none need be done immediately, returning |
| 1380 | * 1 if so. Assumes that notifiers would take care of handling any |
| 1381 | * outstanding requests from the RCU core. |
| 1382 | * |
| 1383 | * This function is part of the RCU implementation; it is -not- |
| 1384 | * an exported member of the RCU API. |
| 1385 | */ |
| 1386 | int rcu_needs_cpu(int cpu) |
| 1387 | { |
| 1388 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
| 1389 | |
| 1390 | return (rdp->donelist != NULL || |
| 1391 | !!rdp->waitlistcount || |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1392 | rdp->nextlist != NULL || |
| 1393 | rdp->nextschedlist != NULL || |
| 1394 | rdp->waitschedlist != NULL); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1395 | } |
| 1396 | |
| 1397 | int rcu_pending(int cpu) |
| 1398 | { |
| 1399 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
| 1400 | |
| 1401 | /* The CPU has at least one callback queued somewhere. */ |
| 1402 | |
| 1403 | if (rdp->donelist != NULL || |
| 1404 | !!rdp->waitlistcount || |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1405 | rdp->nextlist != NULL || |
| 1406 | rdp->nextschedlist != NULL || |
| 1407 | rdp->waitschedlist != NULL) |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1408 | return 1; |
| 1409 | |
| 1410 | /* The RCU core needs an acknowledgement from this CPU. */ |
| 1411 | |
| 1412 | if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) || |
| 1413 | (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed)) |
| 1414 | return 1; |
| 1415 | |
| 1416 | /* This CPU has fallen behind the global grace-period number. */ |
| 1417 | |
| 1418 | if (rdp->completed != rcu_ctrlblk.completed) |
| 1419 | return 1; |
| 1420 | |
| 1421 | /* Nothing needed from this CPU. */ |
| 1422 | |
| 1423 | return 0; |
| 1424 | } |
| 1425 | |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1426 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
| 1427 | unsigned long action, void *hcpu) |
| 1428 | { |
| 1429 | long cpu = (long)hcpu; |
| 1430 | |
| 1431 | switch (action) { |
| 1432 | case CPU_UP_PREPARE: |
| 1433 | case CPU_UP_PREPARE_FROZEN: |
| 1434 | rcu_online_cpu(cpu); |
| 1435 | break; |
| 1436 | case CPU_UP_CANCELED: |
| 1437 | case CPU_UP_CANCELED_FROZEN: |
| 1438 | case CPU_DEAD: |
| 1439 | case CPU_DEAD_FROZEN: |
| 1440 | rcu_offline_cpu(cpu); |
| 1441 | break; |
| 1442 | default: |
| 1443 | break; |
| 1444 | } |
| 1445 | return NOTIFY_OK; |
| 1446 | } |
| 1447 | |
| 1448 | static struct notifier_block __cpuinitdata rcu_nb = { |
| 1449 | .notifier_call = rcu_cpu_notify, |
| 1450 | }; |
| 1451 | |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1452 | void __init __rcu_init(void) |
| 1453 | { |
| 1454 | int cpu; |
| 1455 | int i; |
| 1456 | struct rcu_data *rdp; |
| 1457 | |
| 1458 | printk(KERN_NOTICE "Preemptible RCU implementation.\n"); |
| 1459 | for_each_possible_cpu(cpu) { |
| 1460 | rdp = RCU_DATA_CPU(cpu); |
| 1461 | spin_lock_init(&rdp->lock); |
| 1462 | rdp->completed = 0; |
| 1463 | rdp->waitlistcount = 0; |
| 1464 | rdp->nextlist = NULL; |
| 1465 | rdp->nexttail = &rdp->nextlist; |
| 1466 | for (i = 0; i < GP_STAGES; i++) { |
| 1467 | rdp->waitlist[i] = NULL; |
| 1468 | rdp->waittail[i] = &rdp->waitlist[i]; |
| 1469 | } |
| 1470 | rdp->donelist = NULL; |
| 1471 | rdp->donetail = &rdp->donelist; |
| 1472 | rdp->rcu_flipctr[0] = 0; |
| 1473 | rdp->rcu_flipctr[1] = 0; |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1474 | rdp->nextschedlist = NULL; |
| 1475 | rdp->nextschedtail = &rdp->nextschedlist; |
| 1476 | rdp->waitschedlist = NULL; |
| 1477 | rdp->waitschedtail = &rdp->waitschedlist; |
| 1478 | rdp->rcu_sched_sleeping = 0; |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1479 | } |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1480 | register_cpu_notifier(&rcu_nb); |
| 1481 | |
| 1482 | /* |
| 1483 | * We don't need protection against CPU-Hotplug here |
| 1484 | * since |
| 1485 | * a) If a CPU comes online while we are iterating over the |
Rusty Russell | bd232f9 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 1486 | * cpu_online_mask below, we would only end up making a |
Paul E. McKenney | eaf649e | 2008-01-25 21:08:25 +0100 | [diff] [blame] | 1487 | * duplicate call to rcu_online_cpu() which sets the corresponding |
| 1488 | * CPU's mask in the rcu_cpu_online_map. |
| 1489 | * |
| 1490 | * b) A CPU cannot go offline at this point in time since the user |
| 1491 | * does not have access to the sysfs interface, nor do we |
| 1492 | * suspend the system. |
| 1493 | */ |
| 1494 | for_each_online_cpu(cpu) |
| 1495 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu); |
| 1496 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 1497 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1498 | } |
| 1499 | |
| 1500 | /* |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1501 | * Late-boot-time RCU initialization that must wait until after scheduler |
| 1502 | * has been initialized. |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1503 | */ |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1504 | void __init rcu_init_sched(void) |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1505 | { |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 1506 | rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period, |
| 1507 | NULL, |
| 1508 | "rcu_sched_grace_period"); |
| 1509 | WARN_ON(IS_ERR(rcu_sched_grace_period_task)); |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 1510 | } |
| 1511 | |
| 1512 | #ifdef CONFIG_RCU_TRACE |
| 1513 | long *rcupreempt_flipctr(int cpu) |
| 1514 | { |
| 1515 | return &RCU_DATA_CPU(cpu)->rcu_flipctr[0]; |
| 1516 | } |
| 1517 | EXPORT_SYMBOL_GPL(rcupreempt_flipctr); |
| 1518 | |
| 1519 | int rcupreempt_flip_flag(int cpu) |
| 1520 | { |
| 1521 | return per_cpu(rcu_flip_flag, cpu); |
| 1522 | } |
| 1523 | EXPORT_SYMBOL_GPL(rcupreempt_flip_flag); |
| 1524 | |
| 1525 | int rcupreempt_mb_flag(int cpu) |
| 1526 | { |
| 1527 | return per_cpu(rcu_mb_flag, cpu); |
| 1528 | } |
| 1529 | EXPORT_SYMBOL_GPL(rcupreempt_mb_flag); |
| 1530 | |
| 1531 | char *rcupreempt_try_flip_state_name(void) |
| 1532 | { |
| 1533 | return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state]; |
| 1534 | } |
| 1535 | EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name); |
| 1536 | |
| 1537 | struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu) |
| 1538 | { |
| 1539 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); |
| 1540 | |
| 1541 | return &rdp->trace; |
| 1542 | } |
| 1543 | EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu); |
| 1544 | |
| 1545 | #endif /* #ifdef RCU_TRACE */ |