Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 2 | #include <linux/linkage.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 3 | #include <linux/errno.h> |
| 4 | #include <linux/signal.h> |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/ioport.h> |
| 7 | #include <linux/interrupt.h> |
Nicolai Stange | 18f891e | 2018-07-29 12:15:33 +0200 | [diff] [blame] | 8 | #include <linux/irq.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 9 | #include <linux/timex.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 10 | #include <linux/random.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/kernel_stat.h> |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 13 | #include <linux/syscore_ops.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 14 | #include <linux/bitops.h> |
Jaswinder Singh Rajput | 7bafaf3 | 2009-01-04 16:33:52 +0530 | [diff] [blame] | 15 | #include <linux/acpi.h> |
| 16 | #include <linux/io.h> |
| 17 | #include <linux/delay.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 18 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 19 | #include <linux/atomic.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 20 | #include <asm/timer.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 21 | #include <asm/hw_irq.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 22 | #include <asm/pgtable.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 23 | #include <asm/desc.h> |
| 24 | #include <asm/apic.h> |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 25 | #include <asm/i8259.h> |
| 26 | |
| 27 | /* |
| 28 | * This is the 'legacy' 8259A Programmable Interrupt Controller, |
| 29 | * present in the majority of PC/AT boxes. |
| 30 | * plus some generic x86 specific things if generic specifics makes |
| 31 | * any sense at all. |
| 32 | */ |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 33 | static void init_8259A(int auto_eoi); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 34 | |
Thomas Gleixner | 75e4376 | 2023-10-25 23:04:15 +0200 | [diff] [blame] | 35 | static bool pcat_compat __ro_after_init; |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 36 | static int i8259A_auto_eoi; |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 37 | DEFINE_RAW_SPINLOCK(i8259A_lock); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * 8259A PIC functions to handle ISA devices: |
| 41 | */ |
| 42 | |
| 43 | /* |
| 44 | * This contains the irq mask for both 8259A irq controllers, |
| 45 | */ |
| 46 | unsigned int cached_irq_mask = 0xffff; |
| 47 | |
| 48 | /* |
| 49 | * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) |
| 50 | * boards the timer interrupt is not really connected to any IO-APIC pin, |
| 51 | * it's fed to the master 8259A's IR0 line only. |
| 52 | * |
| 53 | * Any '1' bit in this mask means the IRQ is routed through the IO-APIC. |
| 54 | * this 'mixed mode' IRQ handling costs nothing because it's only used |
| 55 | * at IRQ setup time. |
| 56 | */ |
| 57 | unsigned long io_apic_irqs; |
| 58 | |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 59 | static void mask_8259A_irq(unsigned int irq) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 60 | { |
| 61 | unsigned int mask = 1 << irq; |
| 62 | unsigned long flags; |
| 63 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 64 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 65 | cached_irq_mask |= mask; |
| 66 | if (irq & 8) |
| 67 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
| 68 | else |
| 69 | outb(cached_master_mask, PIC_MASTER_IMR); |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 70 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 71 | } |
| 72 | |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 73 | static void disable_8259A_irq(struct irq_data *data) |
| 74 | { |
| 75 | mask_8259A_irq(data->irq); |
| 76 | } |
| 77 | |
| 78 | static void unmask_8259A_irq(unsigned int irq) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 79 | { |
| 80 | unsigned int mask = ~(1 << irq); |
| 81 | unsigned long flags; |
| 82 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 83 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 84 | cached_irq_mask &= mask; |
| 85 | if (irq & 8) |
| 86 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
| 87 | else |
| 88 | outb(cached_master_mask, PIC_MASTER_IMR); |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 89 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 90 | } |
| 91 | |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 92 | static void enable_8259A_irq(struct irq_data *data) |
| 93 | { |
| 94 | unmask_8259A_irq(data->irq); |
| 95 | } |
| 96 | |
Jacob Pan | b81bb37 | 2009-11-09 11:27:04 -0800 | [diff] [blame] | 97 | static int i8259A_irq_pending(unsigned int irq) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 98 | { |
| 99 | unsigned int mask = 1<<irq; |
| 100 | unsigned long flags; |
| 101 | int ret; |
| 102 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 103 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 104 | if (irq < 8) |
| 105 | ret = inb(PIC_MASTER_CMD) & mask; |
| 106 | else |
| 107 | ret = inb(PIC_SLAVE_CMD) & (mask >> 8); |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 108 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
Jacob Pan | b81bb37 | 2009-11-09 11:27:04 -0800 | [diff] [blame] | 113 | static void make_8259A_irq(unsigned int irq) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 114 | { |
| 115 | disable_irq_nosync(irq); |
| 116 | io_apic_irqs &= ~(1<<irq); |
Maciej W. Rozycki | 60e684f | 2014-10-26 16:06:28 +0000 | [diff] [blame] | 117 | irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); |
Thomas Gleixner | 744fe9b | 2023-01-09 22:57:13 +0100 | [diff] [blame] | 118 | irq_set_status_flags(irq, IRQ_LEVEL); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 119 | enable_irq(irq); |
| 120 | } |
| 121 | |
| 122 | /* |
| 123 | * This function assumes to be called rarely. Switching between |
| 124 | * 8259A registers is slow. |
| 125 | * This has to be protected by the irq controller spinlock |
| 126 | * before being called. |
| 127 | */ |
| 128 | static inline int i8259A_irq_real(unsigned int irq) |
| 129 | { |
| 130 | int value; |
| 131 | int irqmask = 1<<irq; |
| 132 | |
| 133 | if (irq < 8) { |
Pavel Machek | 680afbf | 2008-05-21 11:57:52 +0200 | [diff] [blame] | 134 | outb(0x0B, PIC_MASTER_CMD); /* ISR register */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 135 | value = inb(PIC_MASTER_CMD) & irqmask; |
Pavel Machek | 680afbf | 2008-05-21 11:57:52 +0200 | [diff] [blame] | 136 | outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 137 | return value; |
| 138 | } |
Pavel Machek | 680afbf | 2008-05-21 11:57:52 +0200 | [diff] [blame] | 139 | outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 140 | value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); |
Pavel Machek | 680afbf | 2008-05-21 11:57:52 +0200 | [diff] [blame] | 141 | outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 142 | return value; |
| 143 | } |
| 144 | |
| 145 | /* |
| 146 | * Careful! The 8259A is a fragile beast, it pretty |
| 147 | * much _has_ to be done exactly like this (mask it |
| 148 | * first, _then_ send the EOI, and the order of EOI |
| 149 | * to the two 8259s is important! |
| 150 | */ |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 151 | static void mask_and_ack_8259A(struct irq_data *data) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 152 | { |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 153 | unsigned int irq = data->irq; |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 154 | unsigned int irqmask = 1 << irq; |
| 155 | unsigned long flags; |
| 156 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 157 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 158 | /* |
| 159 | * Lightweight spurious IRQ detection. We do not want |
| 160 | * to overdo spurious IRQ handling - it's usually a sign |
| 161 | * of hardware problems, so we only do the checks we can |
| 162 | * do without slowing down good hardware unnecessarily. |
| 163 | * |
| 164 | * Note that IRQ7 and IRQ15 (the two spurious IRQs |
| 165 | * usually resulting from the 8259A-1|2 PICs) occur |
| 166 | * even if the IRQ is masked in the 8259A. Thus we |
| 167 | * can check spurious 8259A IRQs without doing the |
| 168 | * quite slow i8259A_irq_real() call for every IRQ. |
| 169 | * This does not cover 100% of spurious interrupts, |
| 170 | * but should be enough to warn the user that there |
| 171 | * is something bad going on ... |
| 172 | */ |
| 173 | if (cached_irq_mask & irqmask) |
| 174 | goto spurious_8259A_irq; |
| 175 | cached_irq_mask |= irqmask; |
| 176 | |
| 177 | handle_real_irq: |
| 178 | if (irq & 8) { |
| 179 | inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ |
| 180 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 181 | /* 'Specific EOI' to slave */ |
Pavel Machek | 3e8631d | 2008-05-21 11:52:52 +0200 | [diff] [blame] | 182 | outb(0x60+(irq&7), PIC_SLAVE_CMD); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 183 | /* 'Specific EOI' to master-IRQ2 */ |
Pavel Machek | 3e8631d | 2008-05-21 11:52:52 +0200 | [diff] [blame] | 184 | outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 185 | } else { |
| 186 | inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ |
| 187 | outb(cached_master_mask, PIC_MASTER_IMR); |
Pavel Machek | 3e8631d | 2008-05-21 11:52:52 +0200 | [diff] [blame] | 188 | outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 189 | } |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 190 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 191 | return; |
| 192 | |
| 193 | spurious_8259A_irq: |
| 194 | /* |
| 195 | * this is the slow path - should happen rarely. |
| 196 | */ |
| 197 | if (i8259A_irq_real(irq)) |
| 198 | /* |
| 199 | * oops, the IRQ _is_ in service according to the |
| 200 | * 8259A - not spurious, go handle it. |
| 201 | */ |
| 202 | goto handle_real_irq; |
| 203 | |
| 204 | { |
| 205 | static int spurious_irq_mask; |
| 206 | /* |
| 207 | * At this point we can be sure the IRQ is spurious, |
| 208 | * lets ACK and report it. [once per IRQ] |
| 209 | */ |
| 210 | if (!(spurious_irq_mask & irqmask)) { |
Thomas Gleixner | ba6e4d6 | 2020-07-29 10:53:28 +0200 | [diff] [blame] | 211 | printk_deferred(KERN_DEBUG |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 212 | "spurious 8259A interrupt: IRQ%d.\n", irq); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 213 | spurious_irq_mask |= irqmask; |
| 214 | } |
| 215 | atomic_inc(&irq_err_count); |
| 216 | /* |
| 217 | * Theoretically we do not have to handle this IRQ, |
| 218 | * but in Linux this does not cause problems and is |
| 219 | * simpler for us. |
| 220 | */ |
| 221 | goto handle_real_irq; |
| 222 | } |
| 223 | } |
| 224 | |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 225 | struct irq_chip i8259A_chip = { |
| 226 | .name = "XT-PIC", |
| 227 | .irq_mask = disable_8259A_irq, |
| 228 | .irq_disable = disable_8259A_irq, |
| 229 | .irq_unmask = enable_8259A_irq, |
| 230 | .irq_mask_ack = mask_and_ack_8259A, |
| 231 | }; |
| 232 | |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 233 | static char irq_trigger[2]; |
| 234 | /** |
| 235 | * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ |
| 236 | */ |
| 237 | static void restore_ELCR(char *trigger) |
| 238 | { |
| 239 | outb(trigger[0], 0x4d0); |
| 240 | outb(trigger[1], 0x4d1); |
| 241 | } |
| 242 | |
| 243 | static void save_ELCR(char *trigger) |
| 244 | { |
| 245 | /* IRQ 0,1,2,8,13 are marked as reserved */ |
| 246 | trigger[0] = inb(0x4d0) & 0xF8; |
| 247 | trigger[1] = inb(0x4d1) & 0xDE; |
| 248 | } |
| 249 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 250 | static void i8259A_resume(void) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 251 | { |
| 252 | init_8259A(i8259A_auto_eoi); |
| 253 | restore_ELCR(irq_trigger); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 254 | } |
| 255 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 256 | static int i8259A_suspend(void) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 257 | { |
| 258 | save_ELCR(irq_trigger); |
| 259 | return 0; |
| 260 | } |
| 261 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 262 | static void i8259A_shutdown(void) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 263 | { |
| 264 | /* Put the i8259A into a quiescent state that |
| 265 | * the kernel initialization code can get it |
| 266 | * out of. |
| 267 | */ |
| 268 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ |
Yuanhan Liu | d3a8009 | 2012-08-06 22:13:00 +0800 | [diff] [blame] | 269 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 270 | } |
| 271 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 272 | static struct syscore_ops i8259_syscore_ops = { |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 273 | .suspend = i8259A_suspend, |
| 274 | .resume = i8259A_resume, |
| 275 | .shutdown = i8259A_shutdown, |
| 276 | }; |
| 277 | |
Jacob Pan | b81bb37 | 2009-11-09 11:27:04 -0800 | [diff] [blame] | 278 | static void mask_8259A(void) |
Suresh Siddha | d94d93ca | 2008-07-10 11:16:46 -0700 | [diff] [blame] | 279 | { |
| 280 | unsigned long flags; |
| 281 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 282 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
Suresh Siddha | d94d93ca | 2008-07-10 11:16:46 -0700 | [diff] [blame] | 283 | |
| 284 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ |
| 285 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ |
| 286 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 287 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Suresh Siddha | d94d93ca | 2008-07-10 11:16:46 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Jacob Pan | b81bb37 | 2009-11-09 11:27:04 -0800 | [diff] [blame] | 290 | static void unmask_8259A(void) |
Suresh Siddha | d94d93ca | 2008-07-10 11:16:46 -0700 | [diff] [blame] | 291 | { |
| 292 | unsigned long flags; |
| 293 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 294 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
Suresh Siddha | d94d93ca | 2008-07-10 11:16:46 -0700 | [diff] [blame] | 295 | |
| 296 | outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ |
| 297 | outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ |
| 298 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 299 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Suresh Siddha | d94d93ca | 2008-07-10 11:16:46 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Vitaly Kuznetsov | 8c058b0 | 2015-11-03 10:40:14 +0100 | [diff] [blame] | 302 | static int probe_8259A(void) |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 303 | { |
Thomas Gleixner | 75e4376 | 2023-10-25 23:04:15 +0200 | [diff] [blame] | 304 | unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 305 | unsigned long flags; |
Thomas Gleixner | 75e4376 | 2023-10-25 23:04:15 +0200 | [diff] [blame] | 306 | |
K. Y. Srinivasan | e179f69 | 2014-04-14 11:43:49 -0700 | [diff] [blame] | 307 | /* |
Thomas Gleixner | 75e4376 | 2023-10-25 23:04:15 +0200 | [diff] [blame] | 308 | * If MADT has the PCAT_COMPAT flag set, then do not bother probing |
| 309 | * for the PIC. Some BIOSes leave the PIC uninitialized and probing |
| 310 | * fails. |
| 311 | * |
| 312 | * Right now this causes problems as quite some code depends on |
| 313 | * nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly |
| 314 | * when the system has an IO/APIC because then PIC is not required |
| 315 | * at all, except for really old machines where the timer interrupt |
| 316 | * must be routed through the PIC. So just pretend that the PIC is |
| 317 | * there and let legacy_pic->init() initialize it for nothing. |
| 318 | * |
| 319 | * Alternatively this could just try to initialize the PIC and |
| 320 | * repeat the probe, but for cases where there is no PIC that's |
| 321 | * just pointless. |
| 322 | */ |
| 323 | if (pcat_compat) |
| 324 | return nr_legacy_irqs(); |
| 325 | |
| 326 | /* |
| 327 | * Check to see if we have a PIC. Mask all except the cascade and |
| 328 | * read back the value we just wrote. If we don't have a PIC, we |
| 329 | * will read 0xff as opposed to the value we wrote. |
K. Y. Srinivasan | e179f69 | 2014-04-14 11:43:49 -0700 | [diff] [blame] | 330 | */ |
Vitaly Kuznetsov | 8c058b0 | 2015-11-03 10:40:14 +0100 | [diff] [blame] | 331 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
| 332 | |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 333 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ |
K. Y. Srinivasan | e179f69 | 2014-04-14 11:43:49 -0700 | [diff] [blame] | 334 | outb(probe_val, PIC_MASTER_IMR); |
| 335 | new_val = inb(PIC_MASTER_IMR); |
| 336 | if (new_val != probe_val) { |
| 337 | printk(KERN_INFO "Using NULL legacy PIC\n"); |
| 338 | legacy_pic = &null_legacy_pic; |
K. Y. Srinivasan | e179f69 | 2014-04-14 11:43:49 -0700 | [diff] [blame] | 339 | } |
| 340 | |
Vitaly Kuznetsov | 8c058b0 | 2015-11-03 10:40:14 +0100 | [diff] [blame] | 341 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
| 342 | return nr_legacy_irqs(); |
| 343 | } |
| 344 | |
| 345 | static void init_8259A(int auto_eoi) |
| 346 | { |
| 347 | unsigned long flags; |
| 348 | |
| 349 | i8259A_auto_eoi = auto_eoi; |
| 350 | |
| 351 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
| 352 | |
K. Y. Srinivasan | e179f69 | 2014-04-14 11:43:49 -0700 | [diff] [blame] | 353 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 354 | |
| 355 | /* |
| 356 | * outb_pic - this has to work on a wide range of PC hardware. |
| 357 | */ |
| 358 | outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ |
Pavel Machek | c46e62f | 2008-05-28 12:42:57 +0200 | [diff] [blame] | 359 | |
Brian Gerst | 8b455e6 | 2015-05-09 11:36:53 -0400 | [diff] [blame] | 360 | /* ICW2: 8259A-1 IR0-7 mapped to ISA_IRQ_VECTOR(0) */ |
| 361 | outb_pic(ISA_IRQ_VECTOR(0), PIC_MASTER_IMR); |
Pavel Machek | c46e62f | 2008-05-28 12:42:57 +0200 | [diff] [blame] | 362 | |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 363 | /* 8259A-1 (the master) has a slave on IR2 */ |
Pavel Machek | c46e62f | 2008-05-28 12:42:57 +0200 | [diff] [blame] | 364 | outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); |
| 365 | |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 366 | if (auto_eoi) /* master does Auto EOI */ |
| 367 | outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); |
| 368 | else /* master expects normal EOI */ |
| 369 | outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); |
| 370 | |
| 371 | outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ |
Pavel Machek | c46e62f | 2008-05-28 12:42:57 +0200 | [diff] [blame] | 372 | |
Brian Gerst | 8b455e6 | 2015-05-09 11:36:53 -0400 | [diff] [blame] | 373 | /* ICW2: 8259A-2 IR0-7 mapped to ISA_IRQ_VECTOR(8) */ |
| 374 | outb_pic(ISA_IRQ_VECTOR(8), PIC_SLAVE_IMR); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 375 | /* 8259A-2 is a slave on master's IR2 */ |
| 376 | outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); |
| 377 | /* (slave's support for AEOI in flat mode is to be investigated) */ |
| 378 | outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); |
| 379 | |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 380 | if (auto_eoi) |
| 381 | /* |
| 382 | * In AEOI mode we just have to mask the interrupt |
| 383 | * when acking. |
| 384 | */ |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 385 | i8259A_chip.irq_mask_ack = disable_8259A_irq; |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 386 | else |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 387 | i8259A_chip.irq_mask_ack = mask_and_ack_8259A; |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 388 | |
| 389 | udelay(100); /* wait for 8259A to initialize */ |
| 390 | |
| 391 | outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ |
| 392 | outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ |
| 393 | |
Thomas Gleixner | 5619c28 | 2009-07-25 18:35:11 +0200 | [diff] [blame] | 394 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
Pavel Machek | 21fd513 | 2008-05-21 11:44:02 +0200 | [diff] [blame] | 395 | } |
Jacob Pan | b81bb37 | 2009-11-09 11:27:04 -0800 | [diff] [blame] | 396 | |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 397 | /* |
| 398 | * make i8259 a driver so that we can select pic functions at run time. the goal |
| 399 | * is to make x86 binary compatible among pc compatible and non-pc compatible |
| 400 | * platforms, such as x86 MID. |
| 401 | */ |
| 402 | |
Jacob Pan | 28a3c93 | 2010-02-23 02:03:31 -0800 | [diff] [blame] | 403 | static void legacy_pic_noop(void) { }; |
| 404 | static void legacy_pic_uint_noop(unsigned int unused) { }; |
| 405 | static void legacy_pic_int_noop(int unused) { }; |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 406 | static int legacy_pic_irq_pending_noop(unsigned int irq) |
| 407 | { |
| 408 | return 0; |
| 409 | } |
Vitaly Kuznetsov | 8c058b0 | 2015-11-03 10:40:14 +0100 | [diff] [blame] | 410 | static int legacy_pic_probe(void) |
| 411 | { |
| 412 | return 0; |
| 413 | } |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 414 | |
| 415 | struct legacy_pic null_legacy_pic = { |
| 416 | .nr_legacy_irqs = 0, |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 417 | .chip = &dummy_irq_chip, |
| 418 | .mask = legacy_pic_uint_noop, |
| 419 | .unmask = legacy_pic_uint_noop, |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 420 | .mask_all = legacy_pic_noop, |
| 421 | .restore_mask = legacy_pic_noop, |
| 422 | .init = legacy_pic_int_noop, |
Vitaly Kuznetsov | 8c058b0 | 2015-11-03 10:40:14 +0100 | [diff] [blame] | 423 | .probe = legacy_pic_probe, |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 424 | .irq_pending = legacy_pic_irq_pending_noop, |
| 425 | .make_irq = legacy_pic_uint_noop, |
| 426 | }; |
| 427 | |
| 428 | struct legacy_pic default_legacy_pic = { |
| 429 | .nr_legacy_irqs = NR_IRQS_LEGACY, |
| 430 | .chip = &i8259A_chip, |
Thomas Gleixner | 4305df9 | 2010-09-28 15:01:33 +0200 | [diff] [blame] | 431 | .mask = mask_8259A_irq, |
| 432 | .unmask = unmask_8259A_irq, |
| 433 | .mask_all = mask_8259A, |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 434 | .restore_mask = unmask_8259A, |
| 435 | .init = init_8259A, |
Vitaly Kuznetsov | 8c058b0 | 2015-11-03 10:40:14 +0100 | [diff] [blame] | 436 | .probe = probe_8259A, |
Jacob Pan | ef35486 | 2009-11-09 11:24:14 -0800 | [diff] [blame] | 437 | .irq_pending = i8259A_irq_pending, |
| 438 | .make_irq = make_8259A_irq, |
| 439 | }; |
| 440 | |
| 441 | struct legacy_pic *legacy_pic = &default_legacy_pic; |
Hans de Goede | 7ee06cb | 2017-04-08 19:54:20 +0200 | [diff] [blame] | 442 | EXPORT_SYMBOL(legacy_pic); |
Adam Lackorzynski | 087b255 | 2010-07-20 15:18:19 -0700 | [diff] [blame] | 443 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 444 | static int __init i8259A_init_ops(void) |
Adam Lackorzynski | 087b255 | 2010-07-20 15:18:19 -0700 | [diff] [blame] | 445 | { |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 446 | if (legacy_pic == &default_legacy_pic) |
| 447 | register_syscore_ops(&i8259_syscore_ops); |
Adam Lackorzynski | 087b255 | 2010-07-20 15:18:19 -0700 | [diff] [blame] | 448 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 449 | return 0; |
Adam Lackorzynski | 087b255 | 2010-07-20 15:18:19 -0700 | [diff] [blame] | 450 | } |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 451 | device_initcall(i8259A_init_ops); |
Thomas Gleixner | 75e4376 | 2023-10-25 23:04:15 +0200 | [diff] [blame] | 452 | |
| 453 | void __init legacy_pic_pcat_compat(void) |
| 454 | { |
| 455 | pcat_compat = true; |
| 456 | } |