Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/softirq.c |
| 3 | * |
| 4 | * Copyright (C) 1992 Linus Torvalds |
| 5 | * |
Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 6 | * Distribute under GPLv2. |
| 7 | * |
| 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 9 | * |
| 10 | * Remote softirq infrastructure is by Jens Axboe. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/notifier.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/cpu.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 21 | #include <linux/freezer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/rcupdate.h> |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 24 | #include <linux/smp.h> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 25 | #include <linux/tick.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <asm/irq.h> |
| 28 | /* |
| 29 | - No shared variables, all the data are CPU local. |
| 30 | - If a softirq needs serialization, let it serialize itself |
| 31 | by its own spinlocks. |
| 32 | - Even if softirq is serialized, only local cpu is marked for |
| 33 | execution. Hence, we get something sort of weak cpu binding. |
| 34 | Though it is still not clear, will it result in better locality |
| 35 | or will not. |
| 36 | |
| 37 | Examples: |
| 38 | - NET RX softirq. It is multithreaded and does not require |
| 39 | any global serialization. |
| 40 | - NET TX softirq. It kicks software netdevice queues, hence |
| 41 | it is logically serialized per device, but this serialization |
| 42 | is invisible to common code. |
| 43 | - Tasklets: serialized wrt itself. |
| 44 | */ |
| 45 | |
| 46 | #ifndef __ARCH_IRQ_STAT |
| 47 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; |
| 48 | EXPORT_SYMBOL(irq_stat); |
| 49 | #endif |
| 50 | |
Alexey Dobriyan | 978b011 | 2008-09-06 20:04:36 +0200 | [diff] [blame] | 51 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
| 53 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
| 54 | |
| 55 | /* |
| 56 | * we cannot loop indefinitely here to avoid userspace starvation, |
| 57 | * but we also don't want to introduce a worst case 1/HZ latency |
| 58 | * to the pending events, so lets the scheduler to balance |
| 59 | * the softirq load for us. |
| 60 | */ |
| 61 | static inline void wakeup_softirqd(void) |
| 62 | { |
| 63 | /* Interrupts are disabled: no need to stop preemption */ |
| 64 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); |
| 65 | |
| 66 | if (tsk && tsk->state != TASK_RUNNING) |
| 67 | wake_up_process(tsk); |
| 68 | } |
| 69 | |
| 70 | /* |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 71 | * This one is for softirq.c-internal use, |
| 72 | * where hardirqs are disabled legitimately: |
| 73 | */ |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 74 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 75 | static void __local_bh_disable(unsigned long ip) |
| 76 | { |
| 77 | unsigned long flags; |
| 78 | |
| 79 | WARN_ON_ONCE(in_irq()); |
| 80 | |
| 81 | raw_local_irq_save(flags); |
| 82 | add_preempt_count(SOFTIRQ_OFFSET); |
| 83 | /* |
| 84 | * Were softirqs turned off above: |
| 85 | */ |
| 86 | if (softirq_count() == SOFTIRQ_OFFSET) |
| 87 | trace_softirqs_off(ip); |
| 88 | raw_local_irq_restore(flags); |
| 89 | } |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 90 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
| 91 | static inline void __local_bh_disable(unsigned long ip) |
| 92 | { |
| 93 | add_preempt_count(SOFTIRQ_OFFSET); |
| 94 | barrier(); |
| 95 | } |
| 96 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 97 | |
| 98 | void local_bh_disable(void) |
| 99 | { |
| 100 | __local_bh_disable((unsigned long)__builtin_return_address(0)); |
| 101 | } |
| 102 | |
| 103 | EXPORT_SYMBOL(local_bh_disable); |
| 104 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 105 | /* |
| 106 | * Special-case - softirqs can safely be enabled in |
| 107 | * cond_resched_softirq(), or by __do_softirq(), |
| 108 | * without processing still-pending softirqs: |
| 109 | */ |
| 110 | void _local_bh_enable(void) |
| 111 | { |
| 112 | WARN_ON_ONCE(in_irq()); |
| 113 | WARN_ON_ONCE(!irqs_disabled()); |
| 114 | |
| 115 | if (softirq_count() == SOFTIRQ_OFFSET) |
| 116 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); |
| 117 | sub_preempt_count(SOFTIRQ_OFFSET); |
| 118 | } |
| 119 | |
| 120 | EXPORT_SYMBOL(_local_bh_enable); |
| 121 | |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 122 | static inline void _local_bh_enable_ip(unsigned long ip) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 123 | { |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 124 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 125 | #ifdef CONFIG_TRACE_IRQFLAGS |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 126 | local_irq_disable(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 127 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 128 | /* |
| 129 | * Are softirqs going to be turned on now: |
| 130 | */ |
| 131 | if (softirq_count() == SOFTIRQ_OFFSET) |
| 132 | trace_softirqs_on(ip); |
| 133 | /* |
| 134 | * Keep preemption disabled until we are done with |
| 135 | * softirq processing: |
| 136 | */ |
| 137 | sub_preempt_count(SOFTIRQ_OFFSET - 1); |
| 138 | |
| 139 | if (unlikely(!in_interrupt() && local_softirq_pending())) |
| 140 | do_softirq(); |
| 141 | |
| 142 | dec_preempt_count(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 143 | #ifdef CONFIG_TRACE_IRQFLAGS |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 144 | local_irq_enable(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 145 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 146 | preempt_check_resched(); |
| 147 | } |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 148 | |
| 149 | void local_bh_enable(void) |
| 150 | { |
| 151 | _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
| 152 | } |
| 153 | EXPORT_SYMBOL(local_bh_enable); |
| 154 | |
| 155 | void local_bh_enable_ip(unsigned long ip) |
| 156 | { |
| 157 | _local_bh_enable_ip(ip); |
| 158 | } |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 159 | EXPORT_SYMBOL(local_bh_enable_ip); |
| 160 | |
| 161 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
| 163 | * and we fall back to softirqd after that. |
| 164 | * |
| 165 | * This number has been established via experimentation. |
| 166 | * The two things to balance is latency against fairness - |
| 167 | * we want to handle softirqs as soon as possible, but they |
| 168 | * should not be able to lock up the box. |
| 169 | */ |
| 170 | #define MAX_SOFTIRQ_RESTART 10 |
| 171 | |
| 172 | asmlinkage void __do_softirq(void) |
| 173 | { |
| 174 | struct softirq_action *h; |
| 175 | __u32 pending; |
| 176 | int max_restart = MAX_SOFTIRQ_RESTART; |
| 177 | int cpu; |
| 178 | |
| 179 | pending = local_softirq_pending(); |
Paul Mackerras | 829035fd | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 180 | account_system_vtime(current); |
| 181 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 182 | __local_bh_disable((unsigned long)__builtin_return_address(0)); |
| 183 | trace_softirq_enter(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | cpu = smp_processor_id(); |
| 186 | restart: |
| 187 | /* Reset the pending bitmask before enabling irqs */ |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 188 | set_softirq_pending(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | |
Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 190 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
| 192 | h = softirq_vec; |
| 193 | |
| 194 | do { |
| 195 | if (pending & 1) { |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 196 | int prev_count = preempt_count(); |
| 197 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | h->action(h); |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 199 | |
| 200 | if (unlikely(prev_count != preempt_count())) { |
Linus Torvalds | 1c95e1b | 2008-10-16 15:32:46 -0700 | [diff] [blame] | 201 | printk(KERN_ERR "huh, entered softirq %td %p" |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 202 | "with preempt_count %08x," |
| 203 | " exited with %08x?\n", h - softirq_vec, |
| 204 | h->action, prev_count, preempt_count()); |
| 205 | preempt_count() = prev_count; |
| 206 | } |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | rcu_bh_qsctr_inc(cpu); |
| 209 | } |
| 210 | h++; |
| 211 | pending >>= 1; |
| 212 | } while (pending); |
| 213 | |
Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 214 | local_irq_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | |
| 216 | pending = local_softirq_pending(); |
| 217 | if (pending && --max_restart) |
| 218 | goto restart; |
| 219 | |
| 220 | if (pending) |
| 221 | wakeup_softirqd(); |
| 222 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 223 | trace_softirq_exit(); |
Paul Mackerras | 829035fd | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 224 | |
| 225 | account_system_vtime(current); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 226 | _local_bh_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
| 230 | |
| 231 | asmlinkage void do_softirq(void) |
| 232 | { |
| 233 | __u32 pending; |
| 234 | unsigned long flags; |
| 235 | |
| 236 | if (in_interrupt()) |
| 237 | return; |
| 238 | |
| 239 | local_irq_save(flags); |
| 240 | |
| 241 | pending = local_softirq_pending(); |
| 242 | |
| 243 | if (pending) |
| 244 | __do_softirq(); |
| 245 | |
| 246 | local_irq_restore(flags); |
| 247 | } |
| 248 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | #endif |
| 250 | |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 251 | /* |
| 252 | * Enter an interrupt context. |
| 253 | */ |
| 254 | void irq_enter(void) |
| 255 | { |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 256 | int cpu = smp_processor_id(); |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 257 | |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 258 | rcu_irq_enter(); |
Thomas Gleixner | ee5f80a | 2008-11-07 11:06:00 +0100 | [diff] [blame] | 259 | if (idle_cpu(cpu) && !in_interrupt()) { |
| 260 | __irq_enter(); |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 261 | tick_check_idle(cpu); |
Thomas Gleixner | ee5f80a | 2008-11-07 11:06:00 +0100 | [diff] [blame] | 262 | } else |
| 263 | __irq_enter(); |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 264 | } |
| 265 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
| 267 | # define invoke_softirq() __do_softirq() |
| 268 | #else |
| 269 | # define invoke_softirq() do_softirq() |
| 270 | #endif |
| 271 | |
| 272 | /* |
| 273 | * Exit an interrupt context. Process softirqs if needed and possible: |
| 274 | */ |
| 275 | void irq_exit(void) |
| 276 | { |
| 277 | account_system_vtime(current); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 278 | trace_hardirq_exit(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | sub_preempt_count(IRQ_EXIT_OFFSET); |
| 280 | if (!in_interrupt() && local_softirq_pending()) |
| 281 | invoke_softirq(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 282 | |
| 283 | #ifdef CONFIG_NO_HZ |
| 284 | /* Make sure that timer wheel updates are propagated */ |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 285 | rcu_irq_exit(); |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 286 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) |
| 287 | tick_nohz_stop_sched_tick(0); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 288 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | preempt_enable_no_resched(); |
| 290 | } |
| 291 | |
| 292 | /* |
| 293 | * This function must run with irqs disabled! |
| 294 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 295 | inline void raise_softirq_irqoff(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | { |
| 297 | __raise_softirq_irqoff(nr); |
| 298 | |
| 299 | /* |
| 300 | * If we're in an interrupt or softirq, we're done |
| 301 | * (this also catches softirq-disabled code). We will |
| 302 | * actually run the softirq once we return from |
| 303 | * the irq or softirq. |
| 304 | * |
| 305 | * Otherwise we wake up ksoftirqd to make sure we |
| 306 | * schedule the softirq soon. |
| 307 | */ |
| 308 | if (!in_interrupt()) |
| 309 | wakeup_softirqd(); |
| 310 | } |
| 311 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 312 | void raise_softirq(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | { |
| 314 | unsigned long flags; |
| 315 | |
| 316 | local_irq_save(flags); |
| 317 | raise_softirq_irqoff(nr); |
| 318 | local_irq_restore(flags); |
| 319 | } |
| 320 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 321 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | softirq_vec[nr].action = action; |
| 324 | } |
| 325 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | /* Tasklets */ |
| 327 | struct tasklet_head |
| 328 | { |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 329 | struct tasklet_struct *head; |
| 330 | struct tasklet_struct **tail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | }; |
| 332 | |
Vegard Nossum | 4620b49 | 2008-06-12 23:21:53 +0200 | [diff] [blame] | 333 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
| 334 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 336 | void __tasklet_schedule(struct tasklet_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | { |
| 338 | unsigned long flags; |
| 339 | |
| 340 | local_irq_save(flags); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 341 | t->next = NULL; |
| 342 | *__get_cpu_var(tasklet_vec).tail = t; |
| 343 | __get_cpu_var(tasklet_vec).tail = &(t->next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 345 | local_irq_restore(flags); |
| 346 | } |
| 347 | |
| 348 | EXPORT_SYMBOL(__tasklet_schedule); |
| 349 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 350 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | { |
| 352 | unsigned long flags; |
| 353 | |
| 354 | local_irq_save(flags); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 355 | t->next = NULL; |
| 356 | *__get_cpu_var(tasklet_hi_vec).tail = t; |
| 357 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | raise_softirq_irqoff(HI_SOFTIRQ); |
| 359 | local_irq_restore(flags); |
| 360 | } |
| 361 | |
| 362 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
| 363 | |
| 364 | static void tasklet_action(struct softirq_action *a) |
| 365 | { |
| 366 | struct tasklet_struct *list; |
| 367 | |
| 368 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 369 | list = __get_cpu_var(tasklet_vec).head; |
| 370 | __get_cpu_var(tasklet_vec).head = NULL; |
| 371 | __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | local_irq_enable(); |
| 373 | |
| 374 | while (list) { |
| 375 | struct tasklet_struct *t = list; |
| 376 | |
| 377 | list = list->next; |
| 378 | |
| 379 | if (tasklet_trylock(t)) { |
| 380 | if (!atomic_read(&t->count)) { |
| 381 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
| 382 | BUG(); |
| 383 | t->func(t->data); |
| 384 | tasklet_unlock(t); |
| 385 | continue; |
| 386 | } |
| 387 | tasklet_unlock(t); |
| 388 | } |
| 389 | |
| 390 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 391 | t->next = NULL; |
| 392 | *__get_cpu_var(tasklet_vec).tail = t; |
| 393 | __get_cpu_var(tasklet_vec).tail = &(t->next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 395 | local_irq_enable(); |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | static void tasklet_hi_action(struct softirq_action *a) |
| 400 | { |
| 401 | struct tasklet_struct *list; |
| 402 | |
| 403 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 404 | list = __get_cpu_var(tasklet_hi_vec).head; |
| 405 | __get_cpu_var(tasklet_hi_vec).head = NULL; |
| 406 | __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | local_irq_enable(); |
| 408 | |
| 409 | while (list) { |
| 410 | struct tasklet_struct *t = list; |
| 411 | |
| 412 | list = list->next; |
| 413 | |
| 414 | if (tasklet_trylock(t)) { |
| 415 | if (!atomic_read(&t->count)) { |
| 416 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
| 417 | BUG(); |
| 418 | t->func(t->data); |
| 419 | tasklet_unlock(t); |
| 420 | continue; |
| 421 | } |
| 422 | tasklet_unlock(t); |
| 423 | } |
| 424 | |
| 425 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 426 | t->next = NULL; |
| 427 | *__get_cpu_var(tasklet_hi_vec).tail = t; |
| 428 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | __raise_softirq_irqoff(HI_SOFTIRQ); |
| 430 | local_irq_enable(); |
| 431 | } |
| 432 | } |
| 433 | |
| 434 | |
| 435 | void tasklet_init(struct tasklet_struct *t, |
| 436 | void (*func)(unsigned long), unsigned long data) |
| 437 | { |
| 438 | t->next = NULL; |
| 439 | t->state = 0; |
| 440 | atomic_set(&t->count, 0); |
| 441 | t->func = func; |
| 442 | t->data = data; |
| 443 | } |
| 444 | |
| 445 | EXPORT_SYMBOL(tasklet_init); |
| 446 | |
| 447 | void tasklet_kill(struct tasklet_struct *t) |
| 448 | { |
| 449 | if (in_interrupt()) |
| 450 | printk("Attempt to kill tasklet from interrupt\n"); |
| 451 | |
| 452 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
| 453 | do |
| 454 | yield(); |
| 455 | while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
| 456 | } |
| 457 | tasklet_unlock_wait(t); |
| 458 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
| 459 | } |
| 460 | |
| 461 | EXPORT_SYMBOL(tasklet_kill); |
| 462 | |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 463 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
| 464 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); |
| 465 | |
| 466 | static void __local_trigger(struct call_single_data *cp, int softirq) |
| 467 | { |
| 468 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); |
| 469 | |
| 470 | list_add_tail(&cp->list, head); |
| 471 | |
| 472 | /* Trigger the softirq only if the list was previously empty. */ |
| 473 | if (head->next == &cp->list) |
| 474 | raise_softirq_irqoff(softirq); |
| 475 | } |
| 476 | |
| 477 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS |
| 478 | static void remote_softirq_receive(void *data) |
| 479 | { |
| 480 | struct call_single_data *cp = data; |
| 481 | unsigned long flags; |
| 482 | int softirq; |
| 483 | |
| 484 | softirq = cp->priv; |
| 485 | |
| 486 | local_irq_save(flags); |
| 487 | __local_trigger(cp, softirq); |
| 488 | local_irq_restore(flags); |
| 489 | } |
| 490 | |
| 491 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) |
| 492 | { |
| 493 | if (cpu_online(cpu)) { |
| 494 | cp->func = remote_softirq_receive; |
| 495 | cp->info = cp; |
| 496 | cp->flags = 0; |
| 497 | cp->priv = softirq; |
| 498 | |
| 499 | __smp_call_function_single(cpu, cp); |
| 500 | return 0; |
| 501 | } |
| 502 | return 1; |
| 503 | } |
| 504 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ |
| 505 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) |
| 506 | { |
| 507 | return 1; |
| 508 | } |
| 509 | #endif |
| 510 | |
| 511 | /** |
| 512 | * __send_remote_softirq - try to schedule softirq work on a remote cpu |
| 513 | * @cp: private SMP call function data area |
| 514 | * @cpu: the remote cpu |
| 515 | * @this_cpu: the currently executing cpu |
| 516 | * @softirq: the softirq for the work |
| 517 | * |
| 518 | * Attempt to schedule softirq work on a remote cpu. If this cannot be |
| 519 | * done, the work is instead queued up on the local cpu. |
| 520 | * |
| 521 | * Interrupts must be disabled. |
| 522 | */ |
| 523 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) |
| 524 | { |
| 525 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) |
| 526 | __local_trigger(cp, softirq); |
| 527 | } |
| 528 | EXPORT_SYMBOL(__send_remote_softirq); |
| 529 | |
| 530 | /** |
| 531 | * send_remote_softirq - try to schedule softirq work on a remote cpu |
| 532 | * @cp: private SMP call function data area |
| 533 | * @cpu: the remote cpu |
| 534 | * @softirq: the softirq for the work |
| 535 | * |
| 536 | * Like __send_remote_softirq except that disabling interrupts and |
| 537 | * computing the current cpu is done for the caller. |
| 538 | */ |
| 539 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) |
| 540 | { |
| 541 | unsigned long flags; |
| 542 | int this_cpu; |
| 543 | |
| 544 | local_irq_save(flags); |
| 545 | this_cpu = smp_processor_id(); |
| 546 | __send_remote_softirq(cp, cpu, this_cpu, softirq); |
| 547 | local_irq_restore(flags); |
| 548 | } |
| 549 | EXPORT_SYMBOL(send_remote_softirq); |
| 550 | |
| 551 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, |
| 552 | unsigned long action, void *hcpu) |
| 553 | { |
| 554 | /* |
| 555 | * If a CPU goes away, splice its entries to the current CPU |
| 556 | * and trigger a run of the softirq |
| 557 | */ |
| 558 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
| 559 | int cpu = (unsigned long) hcpu; |
| 560 | int i; |
| 561 | |
| 562 | local_irq_disable(); |
| 563 | for (i = 0; i < NR_SOFTIRQS; i++) { |
| 564 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); |
| 565 | struct list_head *local_head; |
| 566 | |
| 567 | if (list_empty(head)) |
| 568 | continue; |
| 569 | |
| 570 | local_head = &__get_cpu_var(softirq_work_list[i]); |
| 571 | list_splice_init(head, local_head); |
| 572 | raise_softirq_irqoff(i); |
| 573 | } |
| 574 | local_irq_enable(); |
| 575 | } |
| 576 | |
| 577 | return NOTIFY_OK; |
| 578 | } |
| 579 | |
| 580 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { |
| 581 | .notifier_call = remote_softirq_cpu_notify, |
| 582 | }; |
| 583 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | void __init softirq_init(void) |
| 585 | { |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 586 | int cpu; |
| 587 | |
| 588 | for_each_possible_cpu(cpu) { |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 589 | int i; |
| 590 | |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 591 | per_cpu(tasklet_vec, cpu).tail = |
| 592 | &per_cpu(tasklet_vec, cpu).head; |
| 593 | per_cpu(tasklet_hi_vec, cpu).tail = |
| 594 | &per_cpu(tasklet_hi_vec, cpu).head; |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 595 | for (i = 0; i < NR_SOFTIRQS; i++) |
| 596 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 597 | } |
| 598 | |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 599 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); |
| 600 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 601 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
| 602 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | } |
| 604 | |
| 605 | static int ksoftirqd(void * __bind_cpu) |
| 606 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | set_current_state(TASK_INTERRUPTIBLE); |
| 608 | |
| 609 | while (!kthread_should_stop()) { |
| 610 | preempt_disable(); |
| 611 | if (!local_softirq_pending()) { |
| 612 | preempt_enable_no_resched(); |
| 613 | schedule(); |
| 614 | preempt_disable(); |
| 615 | } |
| 616 | |
| 617 | __set_current_state(TASK_RUNNING); |
| 618 | |
| 619 | while (local_softirq_pending()) { |
| 620 | /* Preempt disable stops cpu going offline. |
| 621 | If already offline, we'll be on wrong CPU: |
| 622 | don't process */ |
| 623 | if (cpu_is_offline((long)__bind_cpu)) |
| 624 | goto wait_to_die; |
| 625 | do_softirq(); |
| 626 | preempt_enable_no_resched(); |
| 627 | cond_resched(); |
| 628 | preempt_disable(); |
Eric Dumazet | 64ca5ab | 2009-03-04 12:11:56 -0800 | [diff] [blame] | 629 | rcu_qsctr_inc((long)__bind_cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | } |
| 631 | preempt_enable(); |
| 632 | set_current_state(TASK_INTERRUPTIBLE); |
| 633 | } |
| 634 | __set_current_state(TASK_RUNNING); |
| 635 | return 0; |
| 636 | |
| 637 | wait_to_die: |
| 638 | preempt_enable(); |
| 639 | /* Wait for kthread_stop */ |
| 640 | set_current_state(TASK_INTERRUPTIBLE); |
| 641 | while (!kthread_should_stop()) { |
| 642 | schedule(); |
| 643 | set_current_state(TASK_INTERRUPTIBLE); |
| 644 | } |
| 645 | __set_current_state(TASK_RUNNING); |
| 646 | return 0; |
| 647 | } |
| 648 | |
| 649 | #ifdef CONFIG_HOTPLUG_CPU |
| 650 | /* |
| 651 | * tasklet_kill_immediate is called to remove a tasklet which can already be |
| 652 | * scheduled for execution on @cpu. |
| 653 | * |
| 654 | * Unlike tasklet_kill, this function removes the tasklet |
| 655 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. |
| 656 | * |
| 657 | * When this function is called, @cpu must be in the CPU_DEAD state. |
| 658 | */ |
| 659 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) |
| 660 | { |
| 661 | struct tasklet_struct **i; |
| 662 | |
| 663 | BUG_ON(cpu_online(cpu)); |
| 664 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); |
| 665 | |
| 666 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) |
| 667 | return; |
| 668 | |
| 669 | /* CPU is dead, so no lock needed. */ |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 670 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | if (*i == t) { |
| 672 | *i = t->next; |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 673 | /* If this was the tail element, move the tail ptr */ |
| 674 | if (*i == NULL) |
| 675 | per_cpu(tasklet_vec, cpu).tail = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | return; |
| 677 | } |
| 678 | } |
| 679 | BUG(); |
| 680 | } |
| 681 | |
| 682 | static void takeover_tasklets(unsigned int cpu) |
| 683 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | /* CPU is dead, so no lock needed. */ |
| 685 | local_irq_disable(); |
| 686 | |
| 687 | /* Find end, append list for that CPU. */ |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 688 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
| 689 | *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; |
| 690 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; |
| 691 | per_cpu(tasklet_vec, cpu).head = NULL; |
| 692 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
| 693 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 695 | |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 696 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
| 697 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; |
| 698 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; |
| 699 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
| 700 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
| 701 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | raise_softirq_irqoff(HI_SOFTIRQ); |
| 703 | |
| 704 | local_irq_enable(); |
| 705 | } |
| 706 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 707 | |
Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 708 | static int __cpuinit cpu_callback(struct notifier_block *nfb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | unsigned long action, |
| 710 | void *hcpu) |
| 711 | { |
| 712 | int hotcpu = (unsigned long)hcpu; |
| 713 | struct task_struct *p; |
| 714 | |
| 715 | switch (action) { |
| 716 | case CPU_UP_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 717 | case CPU_UP_PREPARE_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); |
| 719 | if (IS_ERR(p)) { |
| 720 | printk("ksoftirqd for %i failed\n", hotcpu); |
| 721 | return NOTIFY_BAD; |
| 722 | } |
| 723 | kthread_bind(p, hotcpu); |
| 724 | per_cpu(ksoftirqd, hotcpu) = p; |
| 725 | break; |
| 726 | case CPU_ONLINE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 727 | case CPU_ONLINE_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); |
| 729 | break; |
| 730 | #ifdef CONFIG_HOTPLUG_CPU |
| 731 | case CPU_UP_CANCELED: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 732 | case CPU_UP_CANCELED_FROZEN: |
Heiko Carstens | fc75cdf | 2006-06-25 05:49:10 -0700 | [diff] [blame] | 733 | if (!per_cpu(ksoftirqd, hotcpu)) |
| 734 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | /* Unbind so it can run. Fall thru. */ |
Heiko Carstens | a4c4af7 | 2005-11-07 00:58:38 -0800 | [diff] [blame] | 736 | kthread_bind(per_cpu(ksoftirqd, hotcpu), |
Rusty Russell | f1fc057 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 737 | cpumask_any(cpu_online_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | case CPU_DEAD: |
Satoru Takeuchi | 1c6b4aa | 2007-07-15 23:39:48 -0700 | [diff] [blame] | 739 | case CPU_DEAD_FROZEN: { |
| 740 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
| 741 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | p = per_cpu(ksoftirqd, hotcpu); |
| 743 | per_cpu(ksoftirqd, hotcpu) = NULL; |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 744 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | kthread_stop(p); |
| 746 | takeover_tasklets(hotcpu); |
| 747 | break; |
Satoru Takeuchi | 1c6b4aa | 2007-07-15 23:39:48 -0700 | [diff] [blame] | 748 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 750 | } |
| 751 | return NOTIFY_OK; |
| 752 | } |
| 753 | |
Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 754 | static struct notifier_block __cpuinitdata cpu_nfb = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | .notifier_call = cpu_callback |
| 756 | }; |
| 757 | |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 758 | static __init int spawn_ksoftirqd(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | { |
| 760 | void *cpu = (void *)(long)smp_processor_id(); |
Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 761 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
| 762 | |
| 763 | BUG_ON(err == NOTIFY_BAD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
| 765 | register_cpu_notifier(&cpu_nfb); |
| 766 | return 0; |
| 767 | } |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 768 | early_initcall(spawn_ksoftirqd); |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 769 | |
| 770 | #ifdef CONFIG_SMP |
| 771 | /* |
| 772 | * Call a function on all processors |
| 773 | */ |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 774 | int on_each_cpu(void (*func) (void *info), void *info, int wait) |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 775 | { |
| 776 | int ret = 0; |
| 777 | |
| 778 | preempt_disable(); |
Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 779 | ret = smp_call_function(func, info, wait); |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 780 | local_irq_disable(); |
| 781 | func(info); |
| 782 | local_irq_enable(); |
| 783 | preempt_enable(); |
| 784 | return ret; |
| 785 | } |
| 786 | EXPORT_SYMBOL(on_each_cpu); |
| 787 | #endif |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 788 | |
| 789 | /* |
| 790 | * [ These __weak aliases are kept in a separate compilation unit, so that |
| 791 | * GCC does not inline them incorrectly. ] |
| 792 | */ |
| 793 | |
| 794 | int __init __weak early_irq_init(void) |
| 795 | { |
| 796 | return 0; |
| 797 | } |
| 798 | |
| 799 | int __init __weak arch_early_irq_init(void) |
| 800 | { |
| 801 | return 0; |
| 802 | } |
| 803 | |
| 804 | int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) |
| 805 | { |
| 806 | return 0; |
| 807 | } |