blob: 87508886cbbdadb28d2843047cdbbb90e7656b77 [file] [log] [blame]
Chris Zankel5a0015d2005-06-23 22:01:16 -07001/*
2 * linux/arch/xtensa/kernel/irq.c
3 *
4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386.
6 *
Chris Zankelfd43fe12006-12-10 02:18:47 -08007 * Copyright (C) 2002 - 2006 Tensilica, Inc.
Chris Zankel5a0015d2005-06-23 22:01:16 -07008 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 *
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Kevin Chea
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/seq_file.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/kernel_stat.h>
21
22#include <asm/uaccess.h>
23#include <asm/platform.h>
24
Chris Zankel5a0015d2005-06-23 22:01:16 -070025static unsigned int cached_irq_mask;
26
27atomic_t irq_err_count;
28
29/*
Chris Zankel5a0015d2005-06-23 22:01:16 -070030 * do_IRQ handles all normal device IRQ's (the special
31 * SMP cross-CPU interrupts have their own specific
32 * handlers).
33 */
34
Chris Zankelfd43fe12006-12-10 02:18:47 -080035asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
Chris Zankel5a0015d2005-06-23 22:01:16 -070036{
Chris Zankelfd43fe12006-12-10 02:18:47 -080037 struct pt_regs *old_regs = set_irq_regs(regs);
38 struct irq_desc *desc = irq_desc + irq;
39
40 if (irq >= NR_IRQS) {
41 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
Harvey Harrison1b532c62008-07-30 12:48:54 -070042 __func__, irq);
Chris Zankelfd43fe12006-12-10 02:18:47 -080043 }
44
Chris Zankel5a0015d2005-06-23 22:01:16 -070045 irq_enter();
46
47#ifdef CONFIG_DEBUG_STACKOVERFLOW
48 /* Debugging check for stack overflow: is there less than 1KB free? */
49 {
50 unsigned long sp;
51
52 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
53 sp &= THREAD_SIZE - 1;
54
55 if (unlikely(sp < (sizeof(thread_info) + 1024)))
56 printk("Stack overflow in do_IRQ: %ld\n",
57 sp - sizeof(struct thread_info));
58 }
59#endif
Chris Zankelfd43fe12006-12-10 02:18:47 -080060 desc->handle_irq(irq, desc);
Chris Zankel5a0015d2005-06-23 22:01:16 -070061
62 irq_exit();
Chris Zankelfd43fe12006-12-10 02:18:47 -080063 set_irq_regs(old_regs);
Chris Zankel5a0015d2005-06-23 22:01:16 -070064}
65
66/*
67 * Generic, controller-independent functions:
68 */
69
70int show_interrupts(struct seq_file *p, void *v)
71{
72 int i = *(loff_t *) v, j;
73 struct irqaction * action;
74 unsigned long flags;
75
76 if (i == 0) {
77 seq_printf(p, " ");
Andrew Morton394e3902006-03-23 03:01:05 -080078 for_each_online_cpu(j)
79 seq_printf(p, "CPU%d ",j);
Chris Zankel5a0015d2005-06-23 22:01:16 -070080 seq_putc(p, '\n');
81 }
82
83 if (i < NR_IRQS) {
Thomas Gleixner239007b2009-11-17 16:46:45 +010084 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
Chris Zankel5a0015d2005-06-23 22:01:16 -070085 action = irq_desc[i].action;
86 if (!action)
87 goto skip;
88 seq_printf(p, "%3d: ",i);
89#ifndef CONFIG_SMP
90 seq_printf(p, "%10u ", kstat_irqs(i));
91#else
Andrew Morton394e3902006-03-23 03:01:05 -080092 for_each_online_cpu(j)
Yinghai Ludee41022009-01-11 00:29:15 -080093 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Chris Zankel5a0015d2005-06-23 22:01:16 -070094#endif
Thomas Gleixnerd1ea13c2010-09-23 18:40:07 +020095 seq_printf(p, " %14s", irq_desc[i].chip->name);
Chris Zankel5a0015d2005-06-23 22:01:16 -070096 seq_printf(p, " %s", action->name);
97
98 for (action=action->next; action; action = action->next)
99 seq_printf(p, ", %s", action->name);
100
101 seq_putc(p, '\n');
102skip:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100103 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700104 } else if (i == NR_IRQS) {
105 seq_printf(p, "NMI: ");
Andrew Morton394e3902006-03-23 03:01:05 -0800106 for_each_online_cpu(j)
107 seq_printf(p, "%10u ", nmi_count(j));
Chris Zankel5a0015d2005-06-23 22:01:16 -0700108 seq_putc(p, '\n');
109 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
110 }
111 return 0;
112}
Chris Zankel5a0015d2005-06-23 22:01:16 -0700113
Chris Zankelfd43fe12006-12-10 02:18:47 -0800114static void xtensa_irq_mask(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700115{
116 cached_irq_mask &= ~(1 << irq);
117 set_sr (cached_irq_mask, INTENABLE);
118}
119
Chris Zankelfd43fe12006-12-10 02:18:47 -0800120static void xtensa_irq_unmask(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700121{
122 cached_irq_mask |= 1 << irq;
123 set_sr (cached_irq_mask, INTENABLE);
124}
125
Johannes Weiner4c0d2142009-03-04 16:21:31 +0100126static void xtensa_irq_enable(unsigned int irq)
127{
128 variant_irq_enable(irq);
129 xtensa_irq_unmask(irq);
130}
131
132static void xtensa_irq_disable(unsigned int irq)
133{
134 xtensa_irq_mask(irq);
135 variant_irq_disable(irq);
136}
137
Chris Zankelfd43fe12006-12-10 02:18:47 -0800138static void xtensa_irq_ack(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700139{
Chris Zankelfd43fe12006-12-10 02:18:47 -0800140 set_sr(1 << irq, INTCLEAR);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700141}
142
Chris Zankelfd43fe12006-12-10 02:18:47 -0800143static int xtensa_irq_retrigger(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700144{
Chris Zankelfd43fe12006-12-10 02:18:47 -0800145 set_sr (1 << irq, INTSET);
146 return 1;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700147}
148
Chris Zankel5a0015d2005-06-23 22:01:16 -0700149
Chris Zankelfd43fe12006-12-10 02:18:47 -0800150static struct irq_chip xtensa_irq_chip = {
151 .name = "xtensa",
Johannes Weiner4c0d2142009-03-04 16:21:31 +0100152 .enable = xtensa_irq_enable,
153 .disable = xtensa_irq_disable,
Chris Zankelfd43fe12006-12-10 02:18:47 -0800154 .mask = xtensa_irq_mask,
155 .unmask = xtensa_irq_unmask,
156 .ack = xtensa_irq_ack,
157 .retrigger = xtensa_irq_retrigger,
158};
Chris Zankel5a0015d2005-06-23 22:01:16 -0700159
160void __init init_IRQ(void)
161{
Chris Zankelfd43fe12006-12-10 02:18:47 -0800162 int index;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700163
Chris Zankelfd43fe12006-12-10 02:18:47 -0800164 for (index = 0; index < XTENSA_NR_IRQS; index++) {
165 int mask = 1 << index;
166
167 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
168 set_irq_chip_and_handler(index, &xtensa_irq_chip,
169 handle_simple_irq);
170
171 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
172 set_irq_chip_and_handler(index, &xtensa_irq_chip,
173 handle_edge_irq);
174
175 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
176 set_irq_chip_and_handler(index, &xtensa_irq_chip,
177 handle_level_irq);
178
179 else if (mask & XCHAL_INTTYPE_MASK_TIMER)
180 set_irq_chip_and_handler(index, &xtensa_irq_chip,
181 handle_edge_irq);
182
183 else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
184 /* XCHAL_INTTYPE_MASK_NMI */
185
186 set_irq_chip_and_handler(index, &xtensa_irq_chip,
187 handle_level_irq);
188 }
Chris Zankel5a0015d2005-06-23 22:01:16 -0700189
190 cached_irq_mask = 0;
Daniel Glöckner1beee212009-05-05 15:03:21 +0000191
192 variant_init_irq();
Chris Zankel5a0015d2005-06-23 22:01:16 -0700193}