Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2001 |
| 19 | * |
| 20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
| 21 | * Manfred Spraul <manfred@colorfullife.com> |
| 22 | * |
| 23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
| 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 25 | * Papers: |
| 26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| 27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| 28 | * |
| 29 | * For detailed explanation of Read-Copy Update mechanism see - |
| 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/init.h> |
| 36 | #include <linux/spinlock.h> |
| 37 | #include <linux/smp.h> |
Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 38 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/interrupt.h> |
| 40 | #include <linux/sched.h> |
| 41 | #include <asm/atomic.h> |
| 42 | #include <linux/bitops.h> |
| 43 | #include <linux/module.h> |
| 44 | #include <linux/completion.h> |
| 45 | #include <linux/moduleparam.h> |
| 46 | #include <linux/percpu.h> |
| 47 | #include <linux/notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/cpu.h> |
Ingo Molnar | 9331b31 | 2006-03-23 03:00:19 -0800 | [diff] [blame] | 49 | #include <linux/mutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Peter Zijlstra | 851a67b | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 52 | static struct lock_class_key rcu_lock_key; |
| 53 | struct lockdep_map rcu_lock_map = |
| 54 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
| 55 | |
| 56 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
| 57 | #endif |
| 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | /* Definition for rcupdate control block. */ |
Adrian Bunk | 2178426 | 2006-03-23 03:01:00 -0800 | [diff] [blame] | 60 | static struct rcu_ctrlblk rcu_ctrlblk = { |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 61 | .cur = -300, |
| 62 | .completed = -300, |
Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 63 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 64 | .cpumask = CPU_MASK_NONE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | }; |
Adrian Bunk | 2178426 | 2006-03-23 03:01:00 -0800 | [diff] [blame] | 66 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 67 | .cur = -300, |
| 68 | .completed = -300, |
Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 69 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 70 | .cpumask = CPU_MASK_NONE, |
| 71 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; |
| 74 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; |
| 75 | |
| 76 | /* Fake initialization required by compiler */ |
| 77 | static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 78 | static int blimit = 10; |
| 79 | static int qhimark = 10000; |
| 80 | static int qlowmark = 100; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 81 | |
| 82 | static atomic_t rcu_barrier_cpu_count; |
Ingo Molnar | 9331b31 | 2006-03-23 03:00:19 -0800 | [diff] [blame] | 83 | static DEFINE_MUTEX(rcu_barrier_mutex); |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 84 | static struct completion rcu_barrier_completion; |
| 85 | |
| 86 | #ifdef CONFIG_SMP |
| 87 | static void force_quiescent_state(struct rcu_data *rdp, |
| 88 | struct rcu_ctrlblk *rcp) |
| 89 | { |
| 90 | int cpu; |
| 91 | cpumask_t cpumask; |
| 92 | set_need_resched(); |
Oleg Nesterov | 20e9751 | 2006-10-04 02:17:17 -0700 | [diff] [blame] | 93 | if (unlikely(!rcp->signaled)) { |
| 94 | rcp->signaled = 1; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 95 | /* |
| 96 | * Don't send IPI to itself. With irqs disabled, |
| 97 | * rdp->cpu is the current cpu. |
| 98 | */ |
| 99 | cpumask = rcp->cpumask; |
| 100 | cpu_clear(rdp->cpu, cpumask); |
| 101 | for_each_cpu_mask(cpu, cpumask) |
| 102 | smp_send_reschedule(cpu); |
| 103 | } |
| 104 | } |
| 105 | #else |
| 106 | static inline void force_quiescent_state(struct rcu_data *rdp, |
| 107 | struct rcu_ctrlblk *rcp) |
| 108 | { |
| 109 | set_need_resched(); |
| 110 | } |
| 111 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
| 113 | /** |
| 114 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
| 115 | * @head: structure to be used for queueing the RCU updates. |
| 116 | * @func: actual update function to be invoked after the grace period |
| 117 | * |
| 118 | * The update function will be invoked some time after a full grace |
| 119 | * period elapses, in other words after all currently executing RCU |
| 120 | * read-side critical sections have completed. RCU read-side critical |
| 121 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| 122 | * and may be nested. |
| 123 | */ |
| 124 | void fastcall call_rcu(struct rcu_head *head, |
| 125 | void (*func)(struct rcu_head *rcu)) |
| 126 | { |
| 127 | unsigned long flags; |
| 128 | struct rcu_data *rdp; |
| 129 | |
| 130 | head->func = func; |
| 131 | head->next = NULL; |
| 132 | local_irq_save(flags); |
| 133 | rdp = &__get_cpu_var(rcu_data); |
| 134 | *rdp->nxttail = head; |
| 135 | rdp->nxttail = &head->next; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 136 | if (unlikely(++rdp->qlen > qhimark)) { |
| 137 | rdp->blimit = INT_MAX; |
| 138 | force_quiescent_state(rdp, &rcu_ctrlblk); |
| 139 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | local_irq_restore(flags); |
| 141 | } |
| 142 | |
| 143 | /** |
| 144 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. |
| 145 | * @head: structure to be used for queueing the RCU updates. |
| 146 | * @func: actual update function to be invoked after the grace period |
| 147 | * |
| 148 | * The update function will be invoked some time after a full grace |
| 149 | * period elapses, in other words after all currently executing RCU |
| 150 | * read-side critical sections have completed. call_rcu_bh() assumes |
| 151 | * that the read-side critical sections end on completion of a softirq |
| 152 | * handler. This means that read-side critical sections in process |
| 153 | * context must not be interrupted by softirqs. This interface is to be |
| 154 | * used when most of the read-side critical sections are in softirq context. |
| 155 | * RCU read-side critical sections are delimited by rcu_read_lock() and |
| 156 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() |
| 157 | * and rcu_read_unlock_bh(), if in process context. These may be nested. |
| 158 | */ |
| 159 | void fastcall call_rcu_bh(struct rcu_head *head, |
| 160 | void (*func)(struct rcu_head *rcu)) |
| 161 | { |
| 162 | unsigned long flags; |
| 163 | struct rcu_data *rdp; |
| 164 | |
| 165 | head->func = func; |
| 166 | head->next = NULL; |
| 167 | local_irq_save(flags); |
| 168 | rdp = &__get_cpu_var(rcu_bh_data); |
| 169 | *rdp->nxttail = head; |
| 170 | rdp->nxttail = &head->next; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 171 | |
| 172 | if (unlikely(++rdp->qlen > qhimark)) { |
| 173 | rdp->blimit = INT_MAX; |
| 174 | force_quiescent_state(rdp, &rcu_bh_ctrlblk); |
| 175 | } |
| 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | local_irq_restore(flags); |
| 178 | } |
| 179 | |
| 180 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 181 | * Return the number of RCU batches processed thus far. Useful |
| 182 | * for debug and statistics. |
| 183 | */ |
| 184 | long rcu_batches_completed(void) |
| 185 | { |
| 186 | return rcu_ctrlblk.completed; |
| 187 | } |
| 188 | |
Paul E. McKenney | c32e066 | 2006-06-27 02:54:04 -0700 | [diff] [blame] | 189 | /* |
| 190 | * Return the number of RCU batches processed thus far. Useful |
| 191 | * for debug and statistics. |
| 192 | */ |
| 193 | long rcu_batches_completed_bh(void) |
| 194 | { |
| 195 | return rcu_bh_ctrlblk.completed; |
| 196 | } |
| 197 | |
Dipankar Sarma | ab4720e | 2005-12-12 00:37:05 -0800 | [diff] [blame] | 198 | static void rcu_barrier_callback(struct rcu_head *notused) |
| 199 | { |
| 200 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
| 201 | complete(&rcu_barrier_completion); |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Called with preemption disabled, and from cross-cpu IRQ context. |
| 206 | */ |
| 207 | static void rcu_barrier_func(void *notused) |
| 208 | { |
| 209 | int cpu = smp_processor_id(); |
| 210 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
| 211 | struct rcu_head *head; |
| 212 | |
| 213 | head = &rdp->barrier; |
| 214 | atomic_inc(&rcu_barrier_cpu_count); |
| 215 | call_rcu(head, rcu_barrier_callback); |
| 216 | } |
| 217 | |
| 218 | /** |
| 219 | * rcu_barrier - Wait until all the in-flight RCUs are complete. |
| 220 | */ |
| 221 | void rcu_barrier(void) |
| 222 | { |
| 223 | BUG_ON(in_interrupt()); |
Ingo Molnar | 9331b31 | 2006-03-23 03:00:19 -0800 | [diff] [blame] | 224 | /* Take cpucontrol mutex to protect against CPU hotplug */ |
| 225 | mutex_lock(&rcu_barrier_mutex); |
Dipankar Sarma | ab4720e | 2005-12-12 00:37:05 -0800 | [diff] [blame] | 226 | init_completion(&rcu_barrier_completion); |
| 227 | atomic_set(&rcu_barrier_cpu_count, 0); |
| 228 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); |
| 229 | wait_for_completion(&rcu_barrier_completion); |
Ingo Molnar | 9331b31 | 2006-03-23 03:00:19 -0800 | [diff] [blame] | 230 | mutex_unlock(&rcu_barrier_mutex); |
Dipankar Sarma | ab4720e | 2005-12-12 00:37:05 -0800 | [diff] [blame] | 231 | } |
| 232 | EXPORT_SYMBOL_GPL(rcu_barrier); |
| 233 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 234 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | * Invoke the completed RCU callbacks. They are expected to be in |
| 236 | * a per-cpu list. |
| 237 | */ |
| 238 | static void rcu_do_batch(struct rcu_data *rdp) |
| 239 | { |
| 240 | struct rcu_head *next, *list; |
| 241 | int count = 0; |
| 242 | |
| 243 | list = rdp->donelist; |
| 244 | while (list) { |
Eric Dumazet | 1c69d92 | 2006-12-06 20:38:44 -0800 | [diff] [blame] | 245 | next = list->next; |
| 246 | prefetch(next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | list->func(list); |
| 248 | list = next; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 249 | if (++count >= rdp->blimit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | break; |
| 251 | } |
Eric Dumazet | 1c69d92 | 2006-12-06 20:38:44 -0800 | [diff] [blame] | 252 | rdp->donelist = list; |
Oleg Nesterov | dd9daa2 | 2006-09-12 20:35:55 -0700 | [diff] [blame] | 253 | |
| 254 | local_irq_disable(); |
| 255 | rdp->qlen -= count; |
| 256 | local_irq_enable(); |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 257 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
| 258 | rdp->blimit = blimit; |
Oleg Nesterov | dd9daa2 | 2006-09-12 20:35:55 -0700 | [diff] [blame] | 259 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | if (!rdp->donelist) |
| 261 | rdp->donetail = &rdp->donelist; |
| 262 | else |
| 263 | tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); |
| 264 | } |
| 265 | |
| 266 | /* |
| 267 | * Grace period handling: |
| 268 | * The grace period handling consists out of two steps: |
| 269 | * - A new grace period is started. |
| 270 | * This is done by rcu_start_batch. The start is not broadcasted to |
| 271 | * all cpus, they must pick this up by comparing rcp->cur with |
| 272 | * rdp->quiescbatch. All cpus are recorded in the |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 273 | * rcu_ctrlblk.cpumask bitmap. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | * - All cpus must go through a quiescent state. |
| 275 | * Since the start of the grace period is not broadcasted, at least two |
| 276 | * calls to rcu_check_quiescent_state are required: |
| 277 | * The first call just notices that a new grace period is running. The |
| 278 | * following calls check if there was a quiescent state since the beginning |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 279 | * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | * the bitmap is empty, then the grace period is completed. |
| 281 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace |
| 282 | * period (if necessary). |
| 283 | */ |
| 284 | /* |
| 285 | * Register a new batch of callbacks, and start it up if there is currently no |
| 286 | * active batch and the batch to be registered has not already occurred. |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 287 | * Caller must hold rcu_ctrlblk.lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | */ |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 289 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | if (rcp->next_pending && |
| 292 | rcp->completed == rcp->cur) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | rcp->next_pending = 0; |
Srivatsa Vaddagiri | c3f5902 | 2005-12-12 00:37:07 -0800 | [diff] [blame] | 294 | /* |
| 295 | * next_pending == 0 must be visible in |
| 296 | * __rcu_process_callbacks() before it can see new value of cur. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | */ |
| 298 | smp_wmb(); |
| 299 | rcp->cur++; |
Srivatsa Vaddagiri | c3f5902 | 2005-12-12 00:37:07 -0800 | [diff] [blame] | 300 | |
| 301 | /* |
| 302 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a |
| 303 | * Barrier Otherwise it can cause tickless idle CPUs to be |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 304 | * included in rcp->cpumask, which will extend graceperiods |
Srivatsa Vaddagiri | c3f5902 | 2005-12-12 00:37:07 -0800 | [diff] [blame] | 305 | * unnecessarily. |
| 306 | */ |
| 307 | smp_mb(); |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 308 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); |
Srivatsa Vaddagiri | c3f5902 | 2005-12-12 00:37:07 -0800 | [diff] [blame] | 309 | |
Oleg Nesterov | 20e9751 | 2006-10-04 02:17:17 -0700 | [diff] [blame] | 310 | rcp->signaled = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | } |
| 312 | } |
| 313 | |
| 314 | /* |
| 315 | * cpu went through a quiescent state since the beginning of the grace period. |
| 316 | * Clear it from the cpu mask and complete the grace period if it was the last |
| 317 | * cpu. Start another grace period if someone has further entries pending |
| 318 | */ |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 319 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | { |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 321 | cpu_clear(cpu, rcp->cpumask); |
| 322 | if (cpus_empty(rcp->cpumask)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | /* batch completed ! */ |
| 324 | rcp->completed = rcp->cur; |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 325 | rcu_start_batch(rcp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | } |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * Check if the cpu has gone through a quiescent state (say context |
| 331 | * switch). If so and if it already hasn't done so in this RCU |
| 332 | * quiescent cycle, then indicate that it has done so. |
| 333 | */ |
| 334 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 335 | struct rcu_data *rdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | { |
| 337 | if (rdp->quiescbatch != rcp->cur) { |
| 338 | /* start new grace period: */ |
| 339 | rdp->qs_pending = 1; |
| 340 | rdp->passed_quiesc = 0; |
| 341 | rdp->quiescbatch = rcp->cur; |
| 342 | return; |
| 343 | } |
| 344 | |
| 345 | /* Grace period already completed for this cpu? |
| 346 | * qs_pending is checked instead of the actual bitmap to avoid |
| 347 | * cacheline trashing. |
| 348 | */ |
| 349 | if (!rdp->qs_pending) |
| 350 | return; |
| 351 | |
| 352 | /* |
| 353 | * Was there a quiescent state since the beginning of the grace |
| 354 | * period? If no, then exit and wait for the next call. |
| 355 | */ |
| 356 | if (!rdp->passed_quiesc) |
| 357 | return; |
| 358 | rdp->qs_pending = 0; |
| 359 | |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 360 | spin_lock(&rcp->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | /* |
| 362 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync |
| 363 | * during cpu startup. Ignore the quiescent state. |
| 364 | */ |
| 365 | if (likely(rdp->quiescbatch == rcp->cur)) |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 366 | cpu_quiet(rdp->cpu, rcp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 368 | spin_unlock(&rcp->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | |
| 372 | #ifdef CONFIG_HOTPLUG_CPU |
| 373 | |
| 374 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing |
| 375 | * locking requirements, the list it's pulling from has to belong to a cpu |
| 376 | * which is dead and hence not processing interrupts. |
| 377 | */ |
| 378 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, |
| 379 | struct rcu_head **tail) |
| 380 | { |
| 381 | local_irq_disable(); |
| 382 | *this_rdp->nxttail = list; |
| 383 | if (list) |
| 384 | this_rdp->nxttail = tail; |
| 385 | local_irq_enable(); |
| 386 | } |
| 387 | |
| 388 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 389 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | { |
| 391 | /* if the cpu going offline owns the grace period |
| 392 | * we can block indefinitely waiting for it, so flush |
| 393 | * it here |
| 394 | */ |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 395 | spin_lock_bh(&rcp->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | if (rcp->cur != rcp->completed) |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 397 | cpu_quiet(rdp->cpu, rcp); |
| 398 | spin_unlock_bh(&rcp->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); |
| 400 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); |
Oleg Nesterov | a9c8281 | 2006-01-10 17:24:53 +0300 | [diff] [blame] | 401 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | } |
Oleg Nesterov | a9c8281 | 2006-01-10 17:24:53 +0300 | [diff] [blame] | 403 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | static void rcu_offline_cpu(int cpu) |
| 405 | { |
| 406 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); |
| 407 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); |
| 408 | |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 409 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | &per_cpu(rcu_data, cpu)); |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 411 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | &per_cpu(rcu_bh_data, cpu)); |
| 413 | put_cpu_var(rcu_data); |
| 414 | put_cpu_var(rcu_bh_data); |
| 415 | tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); |
| 416 | } |
| 417 | |
| 418 | #else |
| 419 | |
| 420 | static void rcu_offline_cpu(int cpu) |
| 421 | { |
| 422 | } |
| 423 | |
| 424 | #endif |
| 425 | |
| 426 | /* |
| 427 | * This does the RCU processing work from tasklet context. |
| 428 | */ |
| 429 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 430 | struct rcu_data *rdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | { |
| 432 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { |
| 433 | *rdp->donetail = rdp->curlist; |
| 434 | rdp->donetail = rdp->curtail; |
| 435 | rdp->curlist = NULL; |
| 436 | rdp->curtail = &rdp->curlist; |
| 437 | } |
| 438 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | if (rdp->nxtlist && !rdp->curlist) { |
Oleg Nesterov | caa9ee7 | 2006-03-24 03:15:50 -0800 | [diff] [blame] | 440 | local_irq_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | rdp->curlist = rdp->nxtlist; |
| 442 | rdp->curtail = rdp->nxttail; |
| 443 | rdp->nxtlist = NULL; |
| 444 | rdp->nxttail = &rdp->nxtlist; |
| 445 | local_irq_enable(); |
| 446 | |
| 447 | /* |
| 448 | * start the next batch of callbacks |
| 449 | */ |
| 450 | |
| 451 | /* determine batch number */ |
| 452 | rdp->batch = rcp->cur + 1; |
| 453 | /* see the comment and corresponding wmb() in |
| 454 | * the rcu_start_batch() |
| 455 | */ |
| 456 | smp_rmb(); |
| 457 | |
| 458 | if (!rcp->next_pending) { |
| 459 | /* and start it/schedule start if it's a new batch */ |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 460 | spin_lock(&rcp->lock); |
Oleg Nesterov | dbc1651 | 2006-01-08 22:19:33 +0300 | [diff] [blame] | 461 | rcp->next_pending = 1; |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 462 | rcu_start_batch(rcp); |
| 463 | spin_unlock(&rcp->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } |
Oleg Nesterov | caa9ee7 | 2006-03-24 03:15:50 -0800 | [diff] [blame] | 466 | |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 467 | rcu_check_quiescent_state(rcp, rdp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | if (rdp->donelist) |
| 469 | rcu_do_batch(rdp); |
| 470 | } |
| 471 | |
| 472 | static void rcu_process_callbacks(unsigned long unused) |
| 473 | { |
Oleg Nesterov | 69a0b31 | 2006-01-10 16:48:02 +0300 | [diff] [blame] | 474 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
| 475 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | } |
| 477 | |
Oleg Nesterov | 6775177 | 2006-01-08 22:19:16 +0300 | [diff] [blame] | 478 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
| 479 | { |
| 480 | /* This cpu has pending rcu entries and the grace period |
| 481 | * for them has completed. |
| 482 | */ |
| 483 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) |
| 484 | return 1; |
| 485 | |
| 486 | /* This cpu has no pending entries, but there are new entries */ |
| 487 | if (!rdp->curlist && rdp->nxtlist) |
| 488 | return 1; |
| 489 | |
| 490 | /* This cpu has finished callbacks to invoke */ |
| 491 | if (rdp->donelist) |
| 492 | return 1; |
| 493 | |
| 494 | /* The rcu core waits for a quiescent state from the cpu */ |
| 495 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) |
| 496 | return 1; |
| 497 | |
| 498 | /* nothing to do */ |
| 499 | return 0; |
| 500 | } |
| 501 | |
Heiko Carstens | 986733e | 2006-05-15 09:43:58 -0700 | [diff] [blame] | 502 | /* |
| 503 | * Check to see if there is any immediate RCU-related work to be done |
| 504 | * by the current CPU, returning 1 if so. This function is part of the |
| 505 | * RCU implementation; it is -not- an exported member of the RCU API. |
| 506 | */ |
Oleg Nesterov | 6775177 | 2006-01-08 22:19:16 +0300 | [diff] [blame] | 507 | int rcu_pending(int cpu) |
| 508 | { |
| 509 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || |
| 510 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); |
| 511 | } |
| 512 | |
Heiko Carstens | 986733e | 2006-05-15 09:43:58 -0700 | [diff] [blame] | 513 | /* |
| 514 | * Check to see if any future RCU-related work will need to be done |
| 515 | * by the current CPU, even if none need be done immediately, returning |
| 516 | * 1 if so. This function is part of the RCU implementation; it is -not- |
| 517 | * an exported member of the RCU API. |
| 518 | */ |
| 519 | int rcu_needs_cpu(int cpu) |
| 520 | { |
| 521 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
| 522 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); |
| 523 | |
| 524 | return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); |
| 525 | } |
| 526 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | void rcu_check_callbacks(int cpu, int user) |
| 528 | { |
| 529 | if (user || |
| 530 | (idle_cpu(cpu) && !in_softirq() && |
| 531 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
| 532 | rcu_qsctr_inc(cpu); |
| 533 | rcu_bh_qsctr_inc(cpu); |
| 534 | } else if (!in_softirq()) |
| 535 | rcu_bh_qsctr_inc(cpu); |
| 536 | tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); |
| 537 | } |
| 538 | |
| 539 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
| 540 | struct rcu_data *rdp) |
| 541 | { |
| 542 | memset(rdp, 0, sizeof(*rdp)); |
| 543 | rdp->curtail = &rdp->curlist; |
| 544 | rdp->nxttail = &rdp->nxtlist; |
| 545 | rdp->donetail = &rdp->donelist; |
| 546 | rdp->quiescbatch = rcp->completed; |
| 547 | rdp->qs_pending = 0; |
| 548 | rdp->cpu = cpu; |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 549 | rdp->blimit = blimit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | } |
| 551 | |
Randy Dunlap | 00e1077 | 2008-01-22 03:31:39 -0800 | [diff] [blame] | 552 | static void __cpuinit rcu_online_cpu(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | { |
| 554 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
| 555 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); |
| 556 | |
| 557 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); |
| 558 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); |
| 559 | tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); |
| 560 | } |
| 561 | |
Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 562 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | unsigned long action, void *hcpu) |
| 564 | { |
| 565 | long cpu = (long)hcpu; |
| 566 | switch (action) { |
| 567 | case CPU_UP_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 568 | case CPU_UP_PREPARE_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | rcu_online_cpu(cpu); |
| 570 | break; |
| 571 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 572 | case CPU_DEAD_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | rcu_offline_cpu(cpu); |
| 574 | break; |
| 575 | default: |
| 576 | break; |
| 577 | } |
| 578 | return NOTIFY_OK; |
| 579 | } |
| 580 | |
Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 581 | static struct notifier_block __cpuinitdata rcu_nb = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | .notifier_call = rcu_cpu_notify, |
| 583 | }; |
| 584 | |
| 585 | /* |
| 586 | * Initializes rcu mechanism. Assumed to be called early. |
| 587 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. |
| 588 | * Note that rcu_qsctr and friends are implicitly |
| 589 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. |
| 590 | */ |
| 591 | void __init rcu_init(void) |
| 592 | { |
| 593 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, |
| 594 | (void *)(long)smp_processor_id()); |
| 595 | /* Register notifier for non-boot CPUs */ |
| 596 | register_cpu_notifier(&rcu_nb); |
| 597 | } |
| 598 | |
| 599 | struct rcu_synchronize { |
| 600 | struct rcu_head head; |
| 601 | struct completion completion; |
| 602 | }; |
| 603 | |
| 604 | /* Because of FASTCALL declaration of complete, we use this wrapper */ |
| 605 | static void wakeme_after_rcu(struct rcu_head *head) |
| 606 | { |
| 607 | struct rcu_synchronize *rcu; |
| 608 | |
| 609 | rcu = container_of(head, struct rcu_synchronize, head); |
| 610 | complete(&rcu->completion); |
| 611 | } |
| 612 | |
| 613 | /** |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 614 | * synchronize_rcu - wait until a grace period has elapsed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | * |
| 616 | * Control will return to the caller some time after a full grace |
| 617 | * period has elapsed, in other words after all currently executing RCU |
| 618 | * read-side critical sections have completed. RCU read-side critical |
| 619 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| 620 | * and may be nested. |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 621 | * |
| 622 | * If your read-side code is not protected by rcu_read_lock(), do -not- |
| 623 | * use synchronize_rcu(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | */ |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 625 | void synchronize_rcu(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | { |
| 627 | struct rcu_synchronize rcu; |
| 628 | |
| 629 | init_completion(&rcu.completion); |
| 630 | /* Will wake me after RCU finished */ |
| 631 | call_rcu(&rcu.head, wakeme_after_rcu); |
| 632 | |
| 633 | /* Wait for it */ |
| 634 | wait_for_completion(&rcu.completion); |
| 635 | } |
| 636 | |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 637 | module_param(blimit, int, 0); |
| 638 | module_param(qhimark, int, 0); |
| 639 | module_param(qlowmark, int, 0); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 640 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
Paul E. McKenney | c32e066 | 2006-06-27 02:54:04 -0700 | [diff] [blame] | 641 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); |
Paul E. McKenney | d83015b | 2006-06-23 02:05:51 -0700 | [diff] [blame] | 642 | EXPORT_SYMBOL_GPL(call_rcu); |
| 643 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 644 | EXPORT_SYMBOL_GPL(synchronize_rcu); |