blob: dba04d88b4320bde93b7ce4bee3f728193fd257f [file] [log] [blame]
Jeff Dike2ea5bc52007-05-10 22:22:32 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include "linux/kernel.h"
9#include "linux/module.h"
10#include "linux/smp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include "linux/kernel_stat.h"
12#include "linux/interrupt.h"
13#include "linux/random.h"
14#include "linux/slab.h"
15#include "linux/file.h"
16#include "linux/proc_fs.h"
17#include "linux/init.h"
18#include "linux/seq_file.h"
19#include "linux/profile.h"
20#include "linux/hardirq.h"
21#include "asm/irq.h"
22#include "asm/hw_irq.h"
23#include "asm/atomic.h"
24#include "asm/signal.h"
25#include "asm/system.h"
26#include "asm/errno.h"
27#include "asm/uaccess.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "kern_util.h"
29#include "irq_user.h"
30#include "irq_kern.h"
Jeff Dike75e55842005-09-03 15:57:45 -070031#include "os.h"
Jeff Dike9b4f0182006-03-27 01:14:31 -080032#include "sigio.h"
Paolo 'Blaisorblade' Giarrussoc13e5692006-10-19 23:28:20 -070033#include "um_malloc.h"
Jeff Dike9b4f0182006-03-27 01:14:31 -080034#include "misc_constants.h"
Jeff Dikec14b8492007-05-10 22:22:34 -070035#include "as-layout.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/*
38 * Generic, controller-independent functions:
39 */
40
41int show_interrupts(struct seq_file *p, void *v)
42{
43 int i = *(loff_t *) v, j;
44 struct irqaction * action;
45 unsigned long flags;
46
47 if (i == 0) {
48 seq_printf(p, " ");
49 for_each_online_cpu(j)
50 seq_printf(p, "CPU%d ",j);
51 seq_putc(p, '\n');
52 }
53
54 if (i < NR_IRQS) {
55 spin_lock_irqsave(&irq_desc[i].lock, flags);
56 action = irq_desc[i].action;
Jeff Dike2ea5bc52007-05-10 22:22:32 -070057 if (!action)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 goto skip;
59 seq_printf(p, "%3d: ",i);
60#ifndef CONFIG_SMP
61 seq_printf(p, "%10u ", kstat_irqs(i));
62#else
63 for_each_online_cpu(j)
64 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
65#endif
Ingo Molnard1bef4e2006-06-29 02:24:36 -070066 seq_printf(p, " %14s", irq_desc[i].chip->typename);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 seq_printf(p, " %s", action->name);
68
69 for (action=action->next; action; action = action->next)
70 seq_printf(p, ", %s", action->name);
71
72 seq_putc(p, '\n');
73skip:
74 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
75 } else if (i == NR_IRQS) {
76 seq_putc(p, '\n');
77 }
78
79 return 0;
80}
81
Jeff Diked973a772007-05-06 14:51:27 -070082/*
83 * This list is accessed under irq_lock, except in sigio_handler,
84 * where it is safe from being modified. IRQ handlers won't change it -
85 * if an IRQ source has vanished, it will be freed by free_irqs just
86 * before returning from sigio_handler. That will process a separate
87 * list of irqs to free, with its own locking, coming back here to
88 * remove list elements, taking the irq_lock to do so.
89 */
Jeff Dikef2e62992007-02-10 01:44:23 -080090static struct irq_fd *active_fds = NULL;
Jeff Dike9b4f0182006-03-27 01:14:31 -080091static struct irq_fd **last_irq_ptr = &active_fds;
92
93extern void free_irqs(void);
94
95void sigio_handler(int sig, union uml_pt_regs *regs)
96{
97 struct irq_fd *irq_fd;
98 int n;
99
Jesper Juhl191ef962006-05-01 12:15:57 -0700100 if (smp_sigio_handler())
101 return;
102
103 while (1) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800104 n = os_waiting_for_events(active_fds);
105 if (n <= 0) {
106 if(n == -EINTR) continue;
107 else break;
108 }
109
Jesper Juhl191ef962006-05-01 12:15:57 -0700110 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
111 if (irq_fd->current_events != 0) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800112 irq_fd->current_events = 0;
113 do_IRQ(irq_fd->irq, regs);
114 }
115 }
116 }
117
118 free_irqs();
119}
120
Jeff Dikebfaafd72006-07-10 04:45:10 -0700121static DEFINE_SPINLOCK(irq_lock);
122
Jeff Dike9b4f0182006-03-27 01:14:31 -0800123int activate_fd(int irq, int fd, int type, void *dev_id)
124{
125 struct pollfd *tmp_pfd;
126 struct irq_fd *new_fd, *irq_fd;
127 unsigned long flags;
128 int pid, events, err, n;
129
130 pid = os_getpid();
131 err = os_set_fd_async(fd, pid);
Jesper Juhl191ef962006-05-01 12:15:57 -0700132 if (err < 0)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800133 goto out;
134
Jeff Dike9b4f0182006-03-27 01:14:31 -0800135 err = -ENOMEM;
Jeff Dikef2e62992007-02-10 01:44:23 -0800136 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
Jesper Juhl191ef962006-05-01 12:15:57 -0700137 if (new_fd == NULL)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800138 goto out;
139
Jesper Juhl191ef962006-05-01 12:15:57 -0700140 if (type == IRQ_READ)
141 events = UM_POLLIN | UM_POLLPRI;
142 else
143 events = UM_POLLOUT;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800144 *new_fd = ((struct irq_fd) { .next = NULL,
145 .id = dev_id,
146 .fd = fd,
147 .type = type,
148 .irq = irq,
149 .pid = pid,
150 .events = events,
151 .current_events = 0 } );
152
Paolo 'Blaisorblade' Giarrusso0f978692007-03-07 20:41:13 -0800153 err = -EBUSY;
Jeff Dikebfaafd72006-07-10 04:45:10 -0700154 spin_lock_irqsave(&irq_lock, flags);
Jesper Juhl191ef962006-05-01 12:15:57 -0700155 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
156 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800157 printk("Registering fd %d twice\n", fd);
158 printk("Irqs : %d, %d\n", irq_fd->irq, irq);
159 printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
160 goto out_unlock;
161 }
162 }
163
Jesper Juhl191ef962006-05-01 12:15:57 -0700164 if (type == IRQ_WRITE)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800165 fd = -1;
166
167 tmp_pfd = NULL;
168 n = 0;
169
Jesper Juhl191ef962006-05-01 12:15:57 -0700170 while (1) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800171 n = os_create_pollfd(fd, events, tmp_pfd, n);
172 if (n == 0)
173 break;
174
175 /* n > 0
176 * It means we couldn't put new pollfd to current pollfds
177 * and tmp_fds is NULL or too small for new pollfds array.
178 * Needed size is equal to n as minimum.
179 *
180 * Here we have to drop the lock in order to call
181 * kmalloc, which might sleep.
182 * If something else came in and changed the pollfds array
183 * so we will not be able to put new pollfd struct to pollfds
184 * then we free the buffer tmp_fds and try again.
185 */
Jeff Dikebfaafd72006-07-10 04:45:10 -0700186 spin_unlock_irqrestore(&irq_lock, flags);
Jesper Juhl191ef962006-05-01 12:15:57 -0700187 kfree(tmp_pfd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800188
Jeff Dikef2e62992007-02-10 01:44:23 -0800189 tmp_pfd = kmalloc(n, GFP_KERNEL);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800190 if (tmp_pfd == NULL)
191 goto out_kfree;
192
Jeff Dikebfaafd72006-07-10 04:45:10 -0700193 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800194 }
Jeff Dike9b4f0182006-03-27 01:14:31 -0800195
196 *last_irq_ptr = new_fd;
197 last_irq_ptr = &new_fd->next;
198
Jeff Dikebfaafd72006-07-10 04:45:10 -0700199 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800200
201 /* This calls activate_fd, so it has to be outside the critical
202 * section.
203 */
Jeff Dike8e64d96a2006-07-10 04:45:11 -0700204 maybe_sigio_broken(fd, (type == IRQ_READ));
Jeff Dike9b4f0182006-03-27 01:14:31 -0800205
Jeff Dike19bdf042006-09-25 23:33:04 -0700206 return 0;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800207
208 out_unlock:
Jeff Dikebfaafd72006-07-10 04:45:10 -0700209 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800210 out_kfree:
211 kfree(new_fd);
212 out:
Jeff Dike19bdf042006-09-25 23:33:04 -0700213 return err;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800214}
215
216static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
217{
218 unsigned long flags;
219
Jeff Dikebfaafd72006-07-10 04:45:10 -0700220 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800221 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
Jeff Dikebfaafd72006-07-10 04:45:10 -0700222 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800223}
224
225struct irq_and_dev {
226 int irq;
227 void *dev;
228};
229
230static int same_irq_and_dev(struct irq_fd *irq, void *d)
231{
232 struct irq_and_dev *data = d;
233
Jesper Juhl191ef962006-05-01 12:15:57 -0700234 return ((irq->irq == data->irq) && (irq->id == data->dev));
Jeff Dike9b4f0182006-03-27 01:14:31 -0800235}
236
237void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
238{
239 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
240 .dev = dev });
241
242 free_irq_by_cb(same_irq_and_dev, &data);
243}
244
245static int same_fd(struct irq_fd *irq, void *fd)
246{
Jesper Juhl191ef962006-05-01 12:15:57 -0700247 return (irq->fd == *((int *)fd));
Jeff Dike9b4f0182006-03-27 01:14:31 -0800248}
249
250void free_irq_by_fd(int fd)
251{
252 free_irq_by_cb(same_fd, &fd);
253}
254
Jeff Diked973a772007-05-06 14:51:27 -0700255/* Must be called with irq_lock held */
Jeff Dike9b4f0182006-03-27 01:14:31 -0800256static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
257{
258 struct irq_fd *irq;
259 int i = 0;
260 int fdi;
261
Jesper Juhl191ef962006-05-01 12:15:57 -0700262 for (irq = active_fds; irq != NULL; irq = irq->next) {
263 if ((irq->fd == fd) && (irq->irq == irqnum))
264 break;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800265 i++;
266 }
Jesper Juhl191ef962006-05-01 12:15:57 -0700267 if (irq == NULL) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800268 printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
269 goto out;
270 }
271 fdi = os_get_pollfd(i);
Jesper Juhl191ef962006-05-01 12:15:57 -0700272 if ((fdi != -1) && (fdi != fd)) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800273 printk("find_irq_by_fd - mismatch between active_fds and "
274 "pollfds, fd %d vs %d, need %d\n", irq->fd,
275 fdi, fd);
276 irq = NULL;
277 goto out;
278 }
279 *index_out = i;
280 out:
Jesper Juhl191ef962006-05-01 12:15:57 -0700281 return irq;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800282}
283
284void reactivate_fd(int fd, int irqnum)
285{
286 struct irq_fd *irq;
287 unsigned long flags;
288 int i;
289
Jeff Dikebfaafd72006-07-10 04:45:10 -0700290 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800291 irq = find_irq_by_fd(fd, irqnum, &i);
Jesper Juhl191ef962006-05-01 12:15:57 -0700292 if (irq == NULL) {
Jeff Dikebfaafd72006-07-10 04:45:10 -0700293 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800294 return;
295 }
296 os_set_pollfd(i, irq->fd);
Jeff Dikebfaafd72006-07-10 04:45:10 -0700297 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800298
Jeff Dike19bdf042006-09-25 23:33:04 -0700299 add_sigio_fd(fd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800300}
301
302void deactivate_fd(int fd, int irqnum)
303{
304 struct irq_fd *irq;
305 unsigned long flags;
306 int i;
307
Jeff Dikebfaafd72006-07-10 04:45:10 -0700308 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800309 irq = find_irq_by_fd(fd, irqnum, &i);
Jeff Dike19bdf042006-09-25 23:33:04 -0700310 if(irq == NULL){
311 spin_unlock_irqrestore(&irq_lock, flags);
312 return;
313 }
314
Jeff Dike9b4f0182006-03-27 01:14:31 -0800315 os_set_pollfd(i, -1);
Jeff Dikebfaafd72006-07-10 04:45:10 -0700316 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike19bdf042006-09-25 23:33:04 -0700317
318 ignore_sigio_fd(fd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800319}
320
Jeff Diked973a772007-05-06 14:51:27 -0700321/*
322 * Called just before shutdown in order to provide a clean exec
323 * environment in case the system is rebooting. No locking because
324 * that would cause a pointless shutdown hang if something hadn't
325 * released the lock.
326 */
Jeff Dike9b4f0182006-03-27 01:14:31 -0800327int deactivate_all_fds(void)
328{
329 struct irq_fd *irq;
330 int err;
331
Jesper Juhl191ef962006-05-01 12:15:57 -0700332 for (irq = active_fds; irq != NULL; irq = irq->next) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800333 err = os_clear_fd_async(irq->fd);
Jesper Juhl191ef962006-05-01 12:15:57 -0700334 if (err)
335 return err;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800336 }
337 /* If there is a signal already queued, after unblocking ignore it */
338 os_set_ioignore();
339
Jesper Juhl191ef962006-05-01 12:15:57 -0700340 return 0;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800341}
342
Jeff Dike8ae43ff2006-07-10 04:45:09 -0700343#ifdef CONFIG_MODE_TT
Jeff Dike9b4f0182006-03-27 01:14:31 -0800344void forward_interrupts(int pid)
345{
346 struct irq_fd *irq;
347 unsigned long flags;
348 int err;
349
Jeff Dikebfaafd72006-07-10 04:45:10 -0700350 spin_lock_irqsave(&irq_lock, flags);
Jesper Juhl191ef962006-05-01 12:15:57 -0700351 for (irq = active_fds; irq != NULL; irq = irq->next) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800352 err = os_set_owner(irq->fd, pid);
Jesper Juhl191ef962006-05-01 12:15:57 -0700353 if (err < 0) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800354 /* XXX Just remove the irq rather than
355 * print out an infinite stream of these
356 */
357 printk("Failed to forward %d to pid %d, err = %d\n",
358 irq->fd, pid, -err);
359 }
360
361 irq->pid = pid;
362 }
Jeff Dikebfaafd72006-07-10 04:45:10 -0700363 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800364}
Jeff Dike8ae43ff2006-07-10 04:45:09 -0700365#endif
Jeff Dike9b4f0182006-03-27 01:14:31 -0800366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367/*
368 * do_IRQ handles all normal device IRQ's (the special
369 * SMP cross-CPU interrupts have their own specific
370 * handlers).
371 */
372unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
373{
Al Viro7bea96f2006-10-08 22:49:34 +0100374 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
375 irq_enter();
376 __do_IRQ(irq);
377 irq_exit();
378 set_irq_regs(old_regs);
379 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
382int um_request_irq(unsigned int irq, int fd, int type,
David Howells40220c12006-10-09 12:19:47 +0100383 irq_handler_t handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 unsigned long irqflags, const char * devname,
385 void *dev_id)
386{
387 int err;
388
389 err = request_irq(irq, handler, irqflags, devname, dev_id);
Jesper Juhl191ef962006-05-01 12:15:57 -0700390 if (err)
391 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Jesper Juhl191ef962006-05-01 12:15:57 -0700393 if (fd != -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 err = activate_fd(irq, fd, type, dev_id);
Jesper Juhl191ef962006-05-01 12:15:57 -0700395 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
397EXPORT_SYMBOL(um_request_irq);
398EXPORT_SYMBOL(reactivate_fd);
399
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700400/* hw_interrupt_type must define (startup || enable) &&
401 * (shutdown || disable) && end */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402static void dummy(unsigned int irq)
403{
404}
405
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700406/* This is used for everything else than the timer. */
407static struct hw_interrupt_type normal_irq_type = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 .typename = "SIGIO",
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700409 .release = free_irq_by_irq_and_dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 .disable = dummy,
411 .enable = dummy,
412 .ack = dummy,
413 .end = dummy
414};
415
416static struct hw_interrupt_type SIGVTALRM_irq_type = {
417 .typename = "SIGVTALRM",
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700418 .release = free_irq_by_irq_and_dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 .shutdown = dummy, /* never called */
420 .disable = dummy,
421 .enable = dummy,
422 .ack = dummy,
423 .end = dummy
424};
425
426void __init init_IRQ(void)
427{
428 int i;
429
430 irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
431 irq_desc[TIMER_IRQ].action = NULL;
432 irq_desc[TIMER_IRQ].depth = 1;
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700433 irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 enable_irq(TIMER_IRQ);
Jesper Juhl191ef962006-05-01 12:15:57 -0700435 for (i = 1; i < NR_IRQS; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 irq_desc[i].status = IRQ_DISABLED;
437 irq_desc[i].action = NULL;
438 irq_desc[i].depth = 1;
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700439 irq_desc[i].chip = &normal_irq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 enable_irq(i);
441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
David Howells40220c12006-10-09 12:19:47 +0100444int init_aio_irq(int irq, char *name, irq_handler_t handler)
Jeff Dike75e55842005-09-03 15:57:45 -0700445{
446 int fds[2], err;
447
448 err = os_pipe(fds, 1, 1);
Jesper Juhl191ef962006-05-01 12:15:57 -0700449 if (err) {
Jeff Dike75e55842005-09-03 15:57:45 -0700450 printk("init_aio_irq - os_pipe failed, err = %d\n", -err);
451 goto out;
452 }
453
454 err = um_request_irq(irq, fds[0], IRQ_READ, handler,
Thomas Gleixnerbd6aa652006-07-01 19:29:27 -0700455 IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name,
Jeff Dike75e55842005-09-03 15:57:45 -0700456 (void *) (long) fds[0]);
Jesper Juhl191ef962006-05-01 12:15:57 -0700457 if (err) {
Jeff Dike75e55842005-09-03 15:57:45 -0700458 printk("init_aio_irq - : um_request_irq failed, err = %d\n",
459 err);
460 goto out_close;
461 }
462
463 err = fds[1];
464 goto out;
465
466 out_close:
467 os_close_file(fds[0]);
468 os_close_file(fds[1]);
469 out:
Jesper Juhl191ef962006-05-01 12:15:57 -0700470 return err;
Jeff Dike75e55842005-09-03 15:57:45 -0700471}
Jeff Dikec14b8492007-05-10 22:22:34 -0700472
473/*
474 * IRQ stack entry and exit:
475 *
476 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
477 * and switch over to the IRQ stack after some preparation. We use
478 * sigaltstack to receive signals on a separate stack from the start.
479 * These two functions make sure the rest of the kernel won't be too
480 * upset by being on a different stack. The IRQ stack has a
481 * thread_info structure at the bottom so that current et al continue
482 * to work.
483 *
484 * to_irq_stack copies the current task's thread_info to the IRQ stack
485 * thread_info and sets the tasks's stack to point to the IRQ stack.
486 *
487 * from_irq_stack copies the thread_info struct back (flags may have
488 * been modified) and resets the task's stack pointer.
489 *
490 * Tricky bits -
491 *
492 * What happens when two signals race each other? UML doesn't block
493 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
494 * could arrive while a previous one is still setting up the
495 * thread_info.
496 *
497 * There are three cases -
498 * The first interrupt on the stack - sets up the thread_info and
499 * handles the interrupt
500 * A nested interrupt interrupting the copying of the thread_info -
501 * can't handle the interrupt, as the stack is in an unknown state
502 * A nested interrupt not interrupting the copying of the
503 * thread_info - doesn't do any setup, just handles the interrupt
504 *
505 * The first job is to figure out whether we interrupted stack setup.
506 * This is done by xchging the signal mask with thread_info->pending.
507 * If the value that comes back is zero, then there is no setup in
508 * progress, and the interrupt can be handled. If the value is
509 * non-zero, then there is stack setup in progress. In order to have
510 * the interrupt handled, we leave our signal in the mask, and it will
511 * be handled by the upper handler after it has set up the stack.
512 *
513 * Next is to figure out whether we are the outer handler or a nested
514 * one. As part of setting up the stack, thread_info->real_thread is
515 * set to non-NULL (and is reset to NULL on exit). This is the
516 * nesting indicator. If it is non-NULL, then the stack is already
517 * set up and the handler can run.
518 */
519
520static unsigned long pending_mask;
521
522unsigned long to_irq_stack(int sig, unsigned long *mask_out)
523{
524 struct thread_info *ti;
525 unsigned long mask, old;
526 int nested;
527
528 mask = xchg(&pending_mask, 1 << sig);
529 if(mask != 0){
530 /* If any interrupts come in at this point, we want to
531 * make sure that their bits aren't lost by our
532 * putting our bit in. So, this loop accumulates bits
533 * until xchg returns the same value that we put in.
534 * When that happens, there were no new interrupts,
535 * and pending_mask contains a bit for each interrupt
536 * that came in.
537 */
538 old = 1 << sig;
539 do {
540 old |= mask;
541 mask = xchg(&pending_mask, old);
542 } while(mask != old);
543 return 1;
544 }
545
546 ti = current_thread_info();
547 nested = (ti->real_thread != NULL);
548 if(!nested){
549 struct task_struct *task;
550 struct thread_info *tti;
551
552 task = cpu_tasks[ti->cpu].task;
553 tti = task_thread_info(task);
554 *ti = *tti;
555 ti->real_thread = tti;
556 task->stack = ti;
557 }
558
559 mask = xchg(&pending_mask, 0);
560 *mask_out |= mask | nested;
561 return 0;
562}
563
564unsigned long from_irq_stack(int nested)
565{
566 struct thread_info *ti, *to;
567 unsigned long mask;
568
569 ti = current_thread_info();
570
571 pending_mask = 1;
572
573 to = ti->real_thread;
574 current->stack = to;
575 ti->real_thread = NULL;
576 *to = *ti;
577
578 mask = xchg(&pending_mask, 0);
579 return mask & ~1;
580}
581