blob: f04dbb3069b8fda5168e7a95dc935baf4389f127 [file] [log] [blame]
Andi Kleena32073b2006-06-26 13:56:40 +02001/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
Joe Perchesc767a542012-05-21 19:50:07 -07005
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
Andi Kleena32073b2006-06-26 13:56:40 +02008#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleena32073b2006-06-26 13:56:40 +020010#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020014#include <asm/amd_nb.h>
Andi Kleena32073b2006-06-26 13:56:40 +020015
Andi Kleena32073b2006-06-26 13:56:40 +020016static u32 *flush_words;
17
Jan Beulich691269f2011-02-09 08:26:53 +000018const struct pci_device_id amd_nb_misc_ids[] = {
Joerg Roedelcf169702008-09-02 13:13:40 +020019 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
Borislav Petkovcb293252011-01-19 18:22:11 +010021 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
Borislav Petkov24214442012-05-04 18:28:21 +020022 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050023 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
Aravind Gopalakrishnan94c1acf2013-04-17 14:57:13 -050024 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
Aravind Gopalakrishnan85a88852014-02-20 10:28:46 -060025 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
Andi Kleena32073b2006-06-26 13:56:40 +020026 {}
27};
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020028EXPORT_SYMBOL(amd_nb_misc_ids);
Andi Kleena32073b2006-06-26 13:56:40 +020029
Jan Beulichc391c782013-03-11 09:56:05 +000030static const struct pci_device_id amd_nb_link_ids[] = {
Borislav Petkovcb6c8522011-03-30 20:34:47 +020031 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050032 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
Aravind Gopalakrishnan94c1acf2013-04-17 14:57:13 -050033 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
Aravind Gopalakrishnan85a88852014-02-20 10:28:46 -060034 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
Hans Rosenfeld41b26102011-01-24 16:05:42 +010035 {}
36};
37
Jan Beulich24d9b702011-01-10 16:20:23 +000038const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
39 { 0x00, 0x18, 0x20 },
40 { 0xff, 0x00, 0x20 },
41 { 0xfe, 0x00, 0x20 },
42 { }
43};
44
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +020045struct amd_northbridge_info amd_northbridges;
46EXPORT_SYMBOL(amd_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +020047
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020048static struct pci_dev *next_northbridge(struct pci_dev *dev,
Jan Beulich691269f2011-02-09 08:26:53 +000049 const struct pci_device_id *ids)
Andi Kleena32073b2006-06-26 13:56:40 +020050{
51 do {
52 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
53 if (!dev)
54 break;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020055 } while (!pci_match_id(ids, dev));
Andi Kleena32073b2006-06-26 13:56:40 +020056 return dev;
57}
58
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020059int amd_cache_northbridges(void)
Andi Kleena32073b2006-06-26 13:56:40 +020060{
Borislav Petkov84fd1d32011-03-03 12:59:32 +010061 u16 i = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020062 struct amd_northbridge *nb;
Hans Rosenfeld41b26102011-01-24 16:05:42 +010063 struct pci_dev *misc, *link;
Ben Collins3c6df2a2007-05-23 13:57:43 -070064
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020065 if (amd_nb_num())
Andi Kleena32073b2006-06-26 13:56:40 +020066 return 0;
67
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020068 misc = NULL;
69 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
70 i++;
71
72 if (i == 0)
73 return 0;
74
75 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
76 if (!nb)
77 return -ENOMEM;
78
79 amd_northbridges.nb = nb;
80 amd_northbridges.num = i;
81
Hans Rosenfeld41b26102011-01-24 16:05:42 +010082 link = misc = NULL;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020083 for (i = 0; i != amd_nb_num(); i++) {
84 node_to_amd_nb(i)->misc = misc =
85 next_northbridge(misc, amd_nb_misc_ids);
Hans Rosenfeld41b26102011-01-24 16:05:42 +010086 node_to_amd_nb(i)->link = link =
87 next_northbridge(link, amd_nb_link_ids);
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050088 }
Andi Kleena32073b2006-06-26 13:56:40 +020089
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050090 /* GART present only on Fam15h upto model 0fh */
Andreas Herrmann5c80cc72010-09-30 14:43:16 +020091 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050092 (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020093 amd_northbridges.flags |= AMD_NB_GART;
Andreas Herrmann900f9ac2010-09-17 18:02:54 +020094
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +020095 /*
Aravind Gopalakrishnan7d64ac62013-08-02 17:43:03 -050096 * Check for L3 cache presence.
97 */
98 if (!cpuid_edx(0x80000006))
99 return 0;
100
101 /*
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200102 * Some CPU families support L3 Cache Index Disable. There are some
103 * limitations because of E382 and E388 on family 0x10.
104 */
105 if (boot_cpu_data.x86 == 0x10 &&
106 boot_cpu_data.x86_model >= 0x8 &&
107 (boot_cpu_data.x86_model > 0x9 ||
108 boot_cpu_data.x86_mask >= 0x1))
109 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110
Hans Rosenfeldb453de02011-01-24 16:05:41 +0100111 if (boot_cpu_data.x86 == 0x15)
112 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100114 /* L3 cache partitioning is supported on family 0x15 */
115 if (boot_cpu_data.x86 == 0x15)
116 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117
Andi Kleena32073b2006-06-26 13:56:40 +0200118 return 0;
119}
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200120EXPORT_SYMBOL_GPL(amd_cache_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +0200121
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100122/*
123 * Ignores subdevice/subvendor but as far as I can figure out
124 * they're useless anyways
125 */
126bool __init early_is_amd_nb(u32 device)
Andi Kleena32073b2006-06-26 13:56:40 +0200127{
Jan Beulich691269f2011-02-09 08:26:53 +0000128 const struct pci_device_id *id;
Andi Kleena32073b2006-06-26 13:56:40 +0200129 u32 vendor = device & 0xffff;
Jan Beulich691269f2011-02-09 08:26:53 +0000130
Andi Kleena32073b2006-06-26 13:56:40 +0200131 device >>= 16;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200132 for (id = amd_nb_misc_ids; id->vendor; id++)
Andi Kleena32073b2006-06-26 13:56:40 +0200133 if (vendor == id->vendor && device == id->device)
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100134 return true;
135 return false;
Andi Kleena32073b2006-06-26 13:56:40 +0200136}
137
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700138struct resource *amd_get_mmconfig_range(struct resource *res)
139{
140 u32 address;
141 u64 base, msr;
142 unsigned segn_busn_bits;
143
144 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
145 return NULL;
146
147 /* assume all cpus from fam10h have mmconfig */
148 if (boot_cpu_data.x86 < 0x10)
149 return NULL;
150
151 address = MSR_FAM10H_MMIO_CONF_BASE;
152 rdmsrl(address, msr);
153
154 /* mmconfig is not enabled */
155 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156 return NULL;
157
158 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159
160 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162
163 res->flags = IORESOURCE_MEM;
164 res->start = base;
165 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166 return res;
167}
168
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100169int amd_get_subcaches(int cpu)
170{
171 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172 unsigned int mask;
Kevin Winchester141168c2011-12-20 20:52:22 -0400173 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100174
175 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176 return 0;
177
178 pci_read_config_dword(link, 0x1d4, &mask);
179
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100180 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100181 return (mask >> (4 * cuid)) & 0xf;
182}
183
Dan Carpenter2993ae32014-01-21 10:22:09 +0300184int amd_set_subcaches(int cpu, unsigned long mask)
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100185{
186 static unsigned int reset, ban;
187 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
188 unsigned int reg;
Kevin Winchester141168c2011-12-20 20:52:22 -0400189 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100190
191 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
192 return -EINVAL;
193
194 /* if necessary, collect reset state of L3 partitioning and BAN mode */
195 if (reset == 0) {
196 pci_read_config_dword(nb->link, 0x1d4, &reset);
197 pci_read_config_dword(nb->misc, 0x1b8, &ban);
198 ban &= 0x180000;
199 }
200
201 /* deactivate BAN mode if any subcaches are to be disabled */
202 if (mask != 0xf) {
203 pci_read_config_dword(nb->misc, 0x1b8, &reg);
204 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205 }
206
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100207 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100208 mask <<= 4 * cuid;
209 mask |= (0xf ^ (1 << cuid)) << 26;
210
211 pci_write_config_dword(nb->link, 0x1d4, mask);
212
213 /* reset BAN mode if L3 partitioning returned to reset state */
214 pci_read_config_dword(nb->link, 0x1d4, &reg);
215 if (reg == reset) {
216 pci_read_config_dword(nb->misc, 0x1b8, &reg);
217 reg &= ~0x180000;
218 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
219 }
220
221 return 0;
222}
223
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100224static int amd_cache_gart(void)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200225{
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100226 u16 i;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200227
228 if (!amd_nb_has_feature(AMD_NB_GART))
229 return 0;
230
231 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
232 if (!flush_words) {
233 amd_northbridges.flags &= ~AMD_NB_GART;
234 return -ENOMEM;
235 }
236
237 for (i = 0; i != amd_nb_num(); i++)
238 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
239 &flush_words[i]);
240
241 return 0;
242}
243
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200244void amd_flush_garts(void)
Andi Kleena32073b2006-06-26 13:56:40 +0200245{
246 int flushed, i;
247 unsigned long flags;
248 static DEFINE_SPINLOCK(gart_lock);
249
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200250 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200251 return;
252
Andi Kleena32073b2006-06-26 13:56:40 +0200253 /* Avoid races between AGP and IOMMU. In theory it's not needed
254 but I'm not sure if the hardware won't lose flush requests
255 when another is pending. This whole thing is so expensive anyways
256 that it doesn't matter to serialize more. -AK */
257 spin_lock_irqsave(&gart_lock, flags);
258 flushed = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200259 for (i = 0; i < amd_nb_num(); i++) {
260 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
261 flush_words[i] | 1);
Andi Kleena32073b2006-06-26 13:56:40 +0200262 flushed++;
263 }
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200264 for (i = 0; i < amd_nb_num(); i++) {
Andi Kleena32073b2006-06-26 13:56:40 +0200265 u32 w;
266 /* Make sure the hardware actually executed the flush*/
267 for (;;) {
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200268 pci_read_config_dword(node_to_amd_nb(i)->misc,
Andi Kleena32073b2006-06-26 13:56:40 +0200269 0x9c, &w);
270 if (!(w & 1))
271 break;
272 cpu_relax();
273 }
274 }
275 spin_unlock_irqrestore(&gart_lock, flags);
276 if (!flushed)
Joe Perchesc767a542012-05-21 19:50:07 -0700277 pr_notice("nothing to flush?\n");
Andi Kleena32073b2006-06-26 13:56:40 +0200278}
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200279EXPORT_SYMBOL_GPL(amd_flush_garts);
Andi Kleena32073b2006-06-26 13:56:40 +0200280
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200281static __init int init_amd_nbs(void)
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100282{
283 int err = 0;
284
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200285 err = amd_cache_northbridges();
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100286
287 if (err < 0)
Joe Perchesc767a542012-05-21 19:50:07 -0700288 pr_notice("Cannot enumerate AMD northbridges\n");
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100289
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200290 if (amd_cache_gart() < 0)
Joe Perchesc767a542012-05-21 19:50:07 -0700291 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200292
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100293 return err;
294}
295
296/* This has to go after the PCI subsystem */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200297fs_initcall(init_amd_nbs);