| /* |
| * Read-Copy Update mechanism for mutual exclusion |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License as published by |
| * the Free Software Foundation; either version 2 of the License, or |
| * (at your option) any later version. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public License |
| * along with this program; if not, write to the Free Software |
| * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| * |
| * Copyright IBM Corporation, 2001 |
| * |
| * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
| * Manfred Spraul <manfred@colorfullife.com> |
| * |
| * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
| * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| * Papers: |
| * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| * |
| * For detailed explanation of Read-Copy Update mechanism see - |
| * http://lse.sourceforge.net/locking/rcupdate.html |
| * |
| */ |
| #include <linux/types.h> |
| #include <linux/kernel.h> |
| #include <linux/init.h> |
| #include <linux/spinlock.h> |
| #include <linux/smp.h> |
| #include <linux/interrupt.h> |
| #include <linux/sched.h> |
| #include <asm/atomic.h> |
| #include <linux/bitops.h> |
| #include <linux/percpu.h> |
| #include <linux/notifier.h> |
| #include <linux/cpu.h> |
| #include <linux/mutex.h> |
| #include <linux/module.h> |
| #include <linux/kernel_stat.h> |
| |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| static struct lock_class_key rcu_lock_key; |
| struct lockdep_map rcu_lock_map = |
| STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
| EXPORT_SYMBOL_GPL(rcu_lock_map); |
| #endif |
| |
| int rcu_scheduler_active __read_mostly; |
| |
| /* |
| * Awaken the corresponding synchronize_rcu() instance now that a |
| * grace period has elapsed. |
| */ |
| void wakeme_after_rcu(struct rcu_head *head) |
| { |
| struct rcu_synchronize *rcu; |
| |
| rcu = container_of(head, struct rcu_synchronize, head); |
| complete(&rcu->completion); |
| } |
| |
| #ifndef CONFIG_TINY_RCU |
| |
| #ifdef CONFIG_TREE_PREEMPT_RCU |
| |
| /** |
| * synchronize_rcu - wait until a grace period has elapsed. |
| * |
| * Control will return to the caller some time after a full grace |
| * period has elapsed, in other words after all currently executing RCU |
| * read-side critical sections have completed. RCU read-side critical |
| * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| * and may be nested. |
| */ |
| void synchronize_rcu(void) |
| { |
| struct rcu_synchronize rcu; |
| |
| if (!rcu_scheduler_active) |
| return; |
| |
| init_completion(&rcu.completion); |
| /* Will wake me after RCU finished. */ |
| call_rcu(&rcu.head, wakeme_after_rcu); |
| /* Wait for it. */ |
| wait_for_completion(&rcu.completion); |
| } |
| EXPORT_SYMBOL_GPL(synchronize_rcu); |
| |
| #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| |
| /** |
| * synchronize_sched - wait until an rcu-sched grace period has elapsed. |
| * |
| * Control will return to the caller some time after a full rcu-sched |
| * grace period has elapsed, in other words after all currently executing |
| * rcu-sched read-side critical sections have completed. These read-side |
| * critical sections are delimited by rcu_read_lock_sched() and |
| * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), |
| * local_irq_disable(), and so on may be used in place of |
| * rcu_read_lock_sched(). |
| * |
| * This means that all preempt_disable code sequences, including NMI and |
| * hardware-interrupt handlers, in progress on entry will have completed |
| * before this primitive returns. However, this does not guarantee that |
| * softirq handlers will have completed, since in some kernels, these |
| * handlers can run in process context, and can block. |
| * |
| * This primitive provides the guarantees made by the (now removed) |
| * synchronize_kernel() API. In contrast, synchronize_rcu() only |
| * guarantees that rcu_read_lock() sections will have completed. |
| * In "classic RCU", these two guarantees happen to be one and |
| * the same, but can differ in realtime RCU implementations. |
| */ |
| void synchronize_sched(void) |
| { |
| struct rcu_synchronize rcu; |
| |
| if (rcu_blocking_is_gp()) |
| return; |
| |
| init_completion(&rcu.completion); |
| /* Will wake me after RCU finished. */ |
| call_rcu_sched(&rcu.head, wakeme_after_rcu); |
| /* Wait for it. */ |
| wait_for_completion(&rcu.completion); |
| } |
| EXPORT_SYMBOL_GPL(synchronize_sched); |
| |
| /** |
| * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. |
| * |
| * Control will return to the caller some time after a full rcu_bh grace |
| * period has elapsed, in other words after all currently executing rcu_bh |
| * read-side critical sections have completed. RCU read-side critical |
| * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), |
| * and may be nested. |
| */ |
| void synchronize_rcu_bh(void) |
| { |
| struct rcu_synchronize rcu; |
| |
| if (rcu_blocking_is_gp()) |
| return; |
| |
| init_completion(&rcu.completion); |
| /* Will wake me after RCU finished. */ |
| call_rcu_bh(&rcu.head, wakeme_after_rcu); |
| /* Wait for it. */ |
| wait_for_completion(&rcu.completion); |
| } |
| EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
| |
| #endif /* #ifndef CONFIG_TINY_RCU */ |
| |
| static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
| unsigned long action, void *hcpu) |
| { |
| return rcu_cpu_notify(self, action, hcpu); |
| } |
| |
| void __init rcu_init(void) |
| { |
| int i; |
| |
| __rcu_init(); |
| cpu_notifier(rcu_barrier_cpu_hotplug, 0); |
| |
| /* |
| * We don't need protection against CPU-hotplug here because |
| * this is called early in boot, before either interrupts |
| * or the scheduler are operational. |
| */ |
| for_each_online_cpu(i) |
| rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); |
| } |
| |
| void rcu_scheduler_starting(void) |
| { |
| WARN_ON(num_online_cpus() != 1); |
| WARN_ON(nr_context_switches() > 0); |
| rcu_scheduler_active = 1; |
| } |