Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Platform dependent support for SGI SN |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License. See the file "COPYING" in the main directory of this archive |
| 6 | * for more details. |
| 7 | * |
John Keller | ff740fb | 2008-12-08 11:44:11 -0600 | [diff] [blame] | 8 | * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/irq.h> |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 13 | #include <linux/init.h> |
Franck Bui-Huu | 8252474 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 14 | #include <linux/rculist.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/sn/addrs.h> |
| 17 | #include <asm/sn/arch.h> |
Prarit Bhargava | c13cf37 | 2005-07-06 15:26:51 -0700 | [diff] [blame] | 18 | #include <asm/sn/intr.h> |
| 19 | #include <asm/sn/pcibr_provider.h> |
Mark Maule | 9b08ebd | 2005-04-25 11:32:16 -0700 | [diff] [blame] | 20 | #include <asm/sn/pcibus_provider_defs.h> |
| 21 | #include <asm/sn/pcidev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <asm/sn/shub_mmr.h> |
| 23 | #include <asm/sn/sn_sal.h> |
John Keller | 6e9de18 | 2007-08-22 19:32:06 -0500 | [diff] [blame] | 24 | #include <asm/sn/sn_feature_sets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); |
| 27 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); |
| 28 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | extern int sn_ioif_inited; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 30 | struct list_head **sn_irq_lh; |
Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 31 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 33 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
| 34 | struct sn_irq_info *sn_irq_info, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | int req_irq, nasid_t req_nasid, |
| 36 | int req_slice) |
| 37 | { |
| 38 | struct ia64_sal_retval ret_stuff; |
| 39 | ret_stuff.status = 0; |
| 40 | ret_stuff.v0 = 0; |
| 41 | |
| 42 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
| 43 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 44 | (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | (u64) req_nasid, (u64) req_slice); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 46 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | return ret_stuff.status; |
| 48 | } |
| 49 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 50 | void sn_intr_free(nasid_t local_nasid, int local_widget, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | struct sn_irq_info *sn_irq_info) |
| 52 | { |
| 53 | struct ia64_sal_retval ret_stuff; |
| 54 | ret_stuff.status = 0; |
| 55 | ret_stuff.v0 = 0; |
| 56 | |
| 57 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
| 58 | (u64) SAL_INTR_FREE, (u64) local_nasid, |
| 59 | (u64) local_widget, (u64) sn_irq_info->irq_irq, |
| 60 | (u64) sn_irq_info->irq_cookie, 0, 0); |
| 61 | } |
| 62 | |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 63 | u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, |
| 64 | struct sn_irq_info *sn_irq_info, |
| 65 | nasid_t req_nasid, int req_slice) |
| 66 | { |
| 67 | struct ia64_sal_retval ret_stuff; |
| 68 | ret_stuff.status = 0; |
| 69 | ret_stuff.v0 = 0; |
| 70 | |
| 71 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
| 72 | (u64) SAL_INTR_REDIRECT, (u64) local_nasid, |
| 73 | (u64) local_widget, __pa(sn_irq_info), |
| 74 | (u64) req_nasid, (u64) req_slice, 0); |
| 75 | |
| 76 | return ret_stuff.status; |
| 77 | } |
| 78 | |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 79 | static unsigned int sn_startup_irq(struct irq_data *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { |
| 81 | return 0; |
| 82 | } |
| 83 | |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 84 | static void sn_shutdown_irq(struct irq_data *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | { |
| 86 | } |
| 87 | |
Russ Anderson | 1f3b604 | 2007-10-31 11:10:38 -0500 | [diff] [blame] | 88 | extern void ia64_mca_register_cpev(int); |
| 89 | |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 90 | static void sn_disable_irq(struct irq_data *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | { |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 92 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
Russ Anderson | 1f3b604 | 2007-10-31 11:10:38 -0500 | [diff] [blame] | 93 | ia64_mca_register_cpev(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | } |
| 95 | |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 96 | static void sn_enable_irq(struct irq_data *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 98 | if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
| 99 | ia64_mca_register_cpev(data->irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 102 | static void sn_ack_irq(struct irq_data *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | { |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 104 | u64 event_occurred, mask; |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 105 | unsigned int irq = data->irq & 0xff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 107 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); |
Colin Ngam | be539c7 | 2005-04-25 13:06:28 -0700 | [diff] [blame] | 108 | mask = event_occurred & SH_ALL_INT_MASK; |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 109 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
| 111 | |
Thomas Gleixner | b5f0149 | 2011-03-25 20:35:17 +0100 | [diff] [blame] | 112 | irq_move_irq(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 115 | struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, |
| 116 | nasid_t nasid, int slice) |
| 117 | { |
| 118 | int vector; |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 119 | int cpuid; |
| 120 | #ifdef CONFIG_SMP |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 121 | int cpuphys; |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 122 | #endif |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 123 | int64_t bridge; |
| 124 | int local_widget, status; |
| 125 | nasid_t local_nasid; |
| 126 | struct sn_irq_info *new_irq_info; |
| 127 | struct sn_pcibus_provider *pci_provider; |
| 128 | |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 129 | bridge = (u64) sn_irq_info->irq_bridge; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 130 | if (!bridge) { |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 131 | return NULL; /* irq is not a device interrupt */ |
| 132 | } |
| 133 | |
| 134 | local_nasid = NASID_GET(bridge); |
| 135 | |
| 136 | if (local_nasid & 1) |
| 137 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
| 138 | else |
| 139 | local_widget = SWIN_WIDGETNUM(bridge); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 140 | vector = sn_irq_info->irq_irq; |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 141 | |
| 142 | /* Make use of SAL_INTR_REDIRECT if PROM supports it */ |
| 143 | status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); |
| 144 | if (!status) { |
| 145 | new_irq_info = sn_irq_info; |
| 146 | goto finish_up; |
| 147 | } |
| 148 | |
| 149 | /* |
| 150 | * PROM does not support SAL_INTR_REDIRECT, or it failed. |
| 151 | * Revert to old method. |
| 152 | */ |
Thomas Meyer | b82a3ec | 2011-11-17 23:43:40 +0100 | [diff] [blame] | 153 | new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info), |
| 154 | GFP_ATOMIC); |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 155 | if (new_irq_info == NULL) |
| 156 | return NULL; |
| 157 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 158 | /* Free the old PROM new_irq_info structure */ |
| 159 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 160 | unregister_intr_pda(new_irq_info); |
| 161 | |
| 162 | /* allocate a new PROM new_irq_info struct */ |
| 163 | status = sn_intr_alloc(local_nasid, local_widget, |
| 164 | new_irq_info, vector, |
| 165 | nasid, slice); |
| 166 | |
| 167 | /* SAL call failed */ |
| 168 | if (status) { |
| 169 | kfree(new_irq_info); |
| 170 | return NULL; |
| 171 | } |
| 172 | |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 173 | register_intr_pda(new_irq_info); |
| 174 | spin_lock(&sn_irq_info_lock); |
| 175 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); |
| 176 | spin_unlock(&sn_irq_info_lock); |
Lai Jiangshan | f218a7e | 2011-03-18 11:48:01 +0800 | [diff] [blame] | 177 | kfree_rcu(sn_irq_info, rcu); |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 178 | |
| 179 | |
| 180 | finish_up: |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 181 | /* Update kernels new_irq_info with new target info */ |
| 182 | cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, |
| 183 | new_irq_info->irq_slice); |
| 184 | new_irq_info->irq_cpuid = cpuid; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 185 | |
| 186 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; |
| 187 | |
| 188 | /* |
| 189 | * If this represents a line interrupt, target it. If it's |
| 190 | * an msi (irq_int_bit < 0), it's already targeted. |
| 191 | */ |
| 192 | if (new_irq_info->irq_int_bit >= 0 && |
| 193 | pci_provider && pci_provider->target_interrupt) |
| 194 | (pci_provider->target_interrupt)(new_irq_info); |
| 195 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 196 | #ifdef CONFIG_SMP |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 197 | cpuphys = cpu_physical_id(cpuid); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 198 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); |
| 199 | #endif |
| 200 | |
| 201 | return new_irq_info; |
| 202 | } |
| 203 | |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 204 | static int sn_set_affinity_irq(struct irq_data *data, |
| 205 | const struct cpumask *mask, bool force) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | { |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 207 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 208 | unsigned int irq = data->irq; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 209 | nasid_t nasid; |
| 210 | int slice; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
Thomas Gleixner | 785aebd | 2014-03-04 20:43:38 +0000 | [diff] [blame] | 212 | nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask)); |
| 213 | slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 215 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 216 | sn_irq_lh[irq], list) |
| 217 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 218 | |
| 219 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | } |
| 221 | |
John Keller | 6e9de18 | 2007-08-22 19:32:06 -0500 | [diff] [blame] | 222 | #ifdef CONFIG_SMP |
| 223 | void sn_set_err_irq_affinity(unsigned int irq) |
| 224 | { |
| 225 | /* |
| 226 | * On systems which support CPU disabling (SHub2), all error interrupts |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 227 | * are targeted at the boot CPU. |
John Keller | 6e9de18 | 2007-08-22 19:32:06 -0500 | [diff] [blame] | 228 | */ |
| 229 | if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) |
| 230 | set_irq_affinity_info(irq, cpu_physical_id(0), 0); |
| 231 | } |
| 232 | #else |
| 233 | void sn_set_err_irq_affinity(unsigned int irq) { } |
| 234 | #endif |
| 235 | |
KAMEZAWA Hiroyuki | e253eb0 | 2007-03-07 14:57:35 -0800 | [diff] [blame] | 236 | static void |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 237 | sn_mask_irq(struct irq_data *data) |
KAMEZAWA Hiroyuki | e253eb0 | 2007-03-07 14:57:35 -0800 | [diff] [blame] | 238 | { |
| 239 | } |
| 240 | |
| 241 | static void |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 242 | sn_unmask_irq(struct irq_data *data) |
KAMEZAWA Hiroyuki | e253eb0 | 2007-03-07 14:57:35 -0800 | [diff] [blame] | 243 | { |
| 244 | } |
| 245 | |
| 246 | struct irq_chip irq_type_sn = { |
Thomas Gleixner | 545c8d8 | 2011-02-04 20:08:36 +0100 | [diff] [blame] | 247 | .name = "SN hub", |
| 248 | .irq_startup = sn_startup_irq, |
| 249 | .irq_shutdown = sn_shutdown_irq, |
| 250 | .irq_enable = sn_enable_irq, |
| 251 | .irq_disable = sn_disable_irq, |
| 252 | .irq_ack = sn_ack_irq, |
| 253 | .irq_mask = sn_mask_irq, |
| 254 | .irq_unmask = sn_unmask_irq, |
| 255 | .irq_set_affinity = sn_set_affinity_irq |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | }; |
| 257 | |
Kenji Kaneshige | 1115200 | 2007-08-13 10:31:26 -0700 | [diff] [blame] | 258 | ia64_vector sn_irq_to_vector(int irq) |
| 259 | { |
| 260 | if (irq >= IA64_NUM_VECTORS) |
| 261 | return 0; |
| 262 | return (ia64_vector)irq; |
| 263 | } |
| 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | unsigned int sn_local_vector_to_irq(u8 vector) |
| 266 | { |
| 267 | return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); |
| 268 | } |
| 269 | |
| 270 | void sn_irq_init(void) |
| 271 | { |
| 272 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 274 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; |
| 275 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; |
| 276 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | for (i = 0; i < NR_IRQS; i++) { |
Thomas Gleixner | a217833 | 2011-03-24 16:44:38 +0100 | [diff] [blame] | 278 | if (irq_get_chip(i) == &no_irq_chip) |
| 279 | irq_set_chip(i, &irq_type_sn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | } |
| 281 | } |
| 282 | |
| 283 | static void register_intr_pda(struct sn_irq_info *sn_irq_info) |
| 284 | { |
| 285 | int irq = sn_irq_info->irq_irq; |
| 286 | int cpu = sn_irq_info->irq_cpuid; |
| 287 | |
| 288 | if (pdacpu(cpu)->sn_last_irq < irq) { |
| 289 | pdacpu(cpu)->sn_last_irq = irq; |
| 290 | } |
| 291 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 292 | if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | pdacpu(cpu)->sn_first_irq = irq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) |
| 297 | { |
| 298 | int irq = sn_irq_info->irq_irq; |
| 299 | int cpu = sn_irq_info->irq_cpuid; |
| 300 | struct sn_irq_info *tmp_irq_info; |
| 301 | int i, foundmatch; |
| 302 | |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 303 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | if (pdacpu(cpu)->sn_last_irq == irq) { |
| 305 | foundmatch = 0; |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 306 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
| 307 | i && !foundmatch; i--) { |
| 308 | list_for_each_entry_rcu(tmp_irq_info, |
| 309 | sn_irq_lh[i], |
| 310 | list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | if (tmp_irq_info->irq_cpuid == cpu) { |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 312 | foundmatch = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | break; |
| 314 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | } |
| 316 | } |
| 317 | pdacpu(cpu)->sn_last_irq = i; |
| 318 | } |
| 319 | |
| 320 | if (pdacpu(cpu)->sn_first_irq == irq) { |
| 321 | foundmatch = 0; |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 322 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
| 323 | i < NR_IRQS && !foundmatch; i++) { |
| 324 | list_for_each_entry_rcu(tmp_irq_info, |
| 325 | sn_irq_lh[i], |
| 326 | list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | if (tmp_irq_info->irq_cpuid == cpu) { |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 328 | foundmatch = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | break; |
| 330 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } |
| 332 | } |
| 333 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
| 334 | } |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 335 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) |
| 339 | { |
| 340 | nasid_t nasid = sn_irq_info->irq_nasid; |
| 341 | int slice = sn_irq_info->irq_slice; |
| 342 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 343 | #ifdef CONFIG_SMP |
| 344 | int cpuphys; |
| 345 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 347 | pci_dev_get(pci_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | sn_irq_info->irq_cpuid = cpu; |
| 349 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
| 350 | |
| 351 | /* link it into the sn_irq[irq] list */ |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 352 | spin_lock(&sn_irq_info_lock); |
| 353 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 354 | reserve_irq_vector(sn_irq_info->irq_irq); |
Dimitri Sivanich | 48e30fa | 2012-02-22 11:11:06 -0800 | [diff] [blame] | 355 | if (sn_irq_info->irq_int_bit != -1) |
| 356 | irq_set_handler(sn_irq_info->irq_irq, handle_level_irq); |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 357 | spin_unlock(&sn_irq_info_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 359 | register_intr_pda(sn_irq_info); |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 360 | #ifdef CONFIG_SMP |
| 361 | cpuphys = cpu_physical_id(cpu); |
| 362 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); |
John Keller | ff740fb | 2008-12-08 11:44:11 -0600 | [diff] [blame] | 363 | /* |
| 364 | * Affinity was set by the PROM, prevent it from |
| 365 | * being reset by the request_irq() path. |
| 366 | */ |
Thomas Gleixner | 33776b0 | 2011-03-25 20:43:34 +0100 | [diff] [blame] | 367 | irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq)); |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 368 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | } |
| 370 | |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 371 | void sn_irq_unfixup(struct pci_dev *pci_dev) |
| 372 | { |
| 373 | struct sn_irq_info *sn_irq_info; |
| 374 | |
| 375 | /* Only cleanup IRQ stuff if this device has a host bus context */ |
| 376 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) |
| 377 | return; |
| 378 | |
| 379 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; |
Prarit Bhargava | 8b34ff4 | 2006-02-09 14:12:24 -0800 | [diff] [blame] | 380 | if (!sn_irq_info) |
| 381 | return; |
| 382 | if (!sn_irq_info->irq_irq) { |
Prarit Bhargava | 6f354b0 | 2005-07-06 15:29:53 -0700 | [diff] [blame] | 383 | kfree(sn_irq_info); |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 384 | return; |
Prarit Bhargava | 6f354b0 | 2005-07-06 15:29:53 -0700 | [diff] [blame] | 385 | } |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 386 | |
| 387 | unregister_intr_pda(sn_irq_info); |
| 388 | spin_lock(&sn_irq_info_lock); |
| 389 | list_del_rcu(&sn_irq_info->list); |
| 390 | spin_unlock(&sn_irq_info_lock); |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 391 | if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) |
| 392 | free_irq_vector(sn_irq_info->irq_irq); |
Lai Jiangshan | f218a7e | 2011-03-18 11:48:01 +0800 | [diff] [blame] | 393 | kfree_rcu(sn_irq_info, rcu); |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 394 | pci_dev_put(pci_dev); |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 395 | |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 396 | } |
| 397 | |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 398 | static inline void |
| 399 | sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) |
| 400 | { |
| 401 | struct sn_pcibus_provider *pci_provider; |
| 402 | |
| 403 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; |
Mike Habeck | 352b0ef | 2007-08-13 17:13:08 -0500 | [diff] [blame] | 404 | |
| 405 | /* Don't force an interrupt if the irq has been disabled */ |
Tony Luck | 4275f4c | 2011-04-01 10:56:03 -0700 | [diff] [blame] | 406 | if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) && |
Mike Habeck | 352b0ef | 2007-08-13 17:13:08 -0500 | [diff] [blame] | 407 | pci_provider && pci_provider->force_interrupt) |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 408 | (*pci_provider->force_interrupt)(sn_irq_info); |
| 409 | } |
| 410 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | /* |
| 412 | * Check for lost interrupts. If the PIC int_status reg. says that |
| 413 | * an interrupt has been sent, but not handled, and the interrupt |
| 414 | * is not pending in either the cpu irr regs or in the soft irr regs, |
| 415 | * and the interrupt is not in service, then the interrupt may have |
| 416 | * been lost. Force an interrupt on that pin. It is possible that |
| 417 | * the interrupt is in flight, so we may generate a spurious interrupt, |
| 418 | * but we should never miss a real lost interrupt. |
| 419 | */ |
| 420 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) |
| 421 | { |
Prarit Bhargava | 53493dc | 2006-01-16 19:54:40 -0800 | [diff] [blame] | 422 | u64 regval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | struct pcidev_info *pcidev_info; |
| 424 | struct pcibus_info *pcibus_info; |
| 425 | |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 426 | /* |
| 427 | * Bridge types attached to TIO (anything but PIC) do not need this WAR |
| 428 | * since they do not target Shub II interrupt registers. If that |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 429 | * ever changes, this check needs to accommodate. |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 430 | */ |
| 431 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) |
| 432 | return; |
| 433 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
| 435 | if (!pcidev_info) |
| 436 | return; |
| 437 | |
| 438 | pcibus_info = |
| 439 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> |
| 440 | pdi_pcibus_info; |
| 441 | regval = pcireg_intr_status_get(pcibus_info); |
| 442 | |
Bjorn Helgaas | 9a4e554 | 2006-03-21 10:44:07 -0700 | [diff] [blame] | 443 | if (!ia64_get_irr(irq_to_vector(irq))) { |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 444 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { |
| 445 | regval &= 0xff; |
| 446 | if (sn_irq_info->irq_int_bit & regval & |
| 447 | sn_irq_info->irq_last_intr) { |
| 448 | regval &= ~(sn_irq_info->irq_int_bit & regval); |
| 449 | sn_call_force_intr_provider(sn_irq_info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | } |
| 451 | } |
| 452 | } |
| 453 | sn_irq_info->irq_last_intr = regval; |
| 454 | } |
| 455 | |
| 456 | void sn_lb_int_war_check(void) |
| 457 | { |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 458 | struct sn_irq_info *sn_irq_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | int i; |
| 460 | |
| 461 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
| 462 | return; |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 463 | |
| 464 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 466 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 467 | sn_check_intr(i, sn_irq_info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | } |
| 469 | } |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 470 | rcu_read_unlock(); |
| 471 | } |
| 472 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 473 | void __init sn_irq_lh_init(void) |
Prarit Bhargava | cb4cb2cb | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 474 | { |
| 475 | int i; |
| 476 | |
| 477 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); |
| 478 | if (!sn_irq_lh) |
| 479 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); |
| 480 | |
| 481 | for (i = 0; i < NR_IRQS; i++) { |
| 482 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); |
| 483 | if (!sn_irq_lh[i]) |
| 484 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); |
| 485 | |
| 486 | INIT_LIST_HEAD(sn_irq_lh[i]); |
| 487 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | } |