blob: b1a714a92b14fe4fcb8d029d6100456b3cc46d5b [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Paul Mundt38c425f2007-05-11 11:26:10 +09006 * Copyright (C) 2005, 2006, 2007 Paul Mundt
Paul Mundt0c7b1df2006-09-27 15:08:07 +09007 *
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16#include <linux/init.h>
17#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000018#include <linux/sysdev.h>
19#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090020#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/bitops.h>
23#include <linux/debugfs.h>
24#include <linux/fs.h>
25#include <linux/seq_file.h>
26#include <linux/err.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090029#include <asm/pgtable.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090030#include <asm/mmu.h>
31#include <asm/io.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090032#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090033
34#define NR_PMB_ENTRIES 16
35
Christoph Lametere18b8902006-12-06 20:33:20 -080036static struct kmem_cache *pmb_cache;
Paul Mundt0c7b1df2006-09-27 15:08:07 +090037static unsigned long pmb_map;
38
39static struct pmb_entry pmb_init_map[] = {
40 /* vpn ppn flags (ub/sz/c/wt) */
41
42 /* P1 Section Mappings */
43 { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
44 { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
45 { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
46 { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
47 { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
48 { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
49
50 /* P2 Section Mappings */
51 { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
52 { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
53 { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
54 { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
55 { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
56 { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
57};
58
59static inline unsigned long mk_pmb_entry(unsigned int entry)
60{
61 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
62}
63
64static inline unsigned long mk_pmb_addr(unsigned int entry)
65{
66 return mk_pmb_entry(entry) | PMB_ADDR;
67}
68
69static inline unsigned long mk_pmb_data(unsigned int entry)
70{
71 return mk_pmb_entry(entry) | PMB_DATA;
72}
73
Paul Mundt38c425f2007-05-11 11:26:10 +090074static DEFINE_SPINLOCK(pmb_list_lock);
75static struct pmb_entry *pmb_list;
76
77static inline void pmb_list_add(struct pmb_entry *pmbe)
78{
79 struct pmb_entry **p, *tmp;
80
81 p = &pmb_list;
82 while ((tmp = *p) != NULL)
83 p = &tmp->next;
84
85 pmbe->next = tmp;
86 *p = pmbe;
87}
88
89static inline void pmb_list_del(struct pmb_entry *pmbe)
90{
91 struct pmb_entry **p, *tmp;
92
93 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
94 if (tmp == pmbe) {
95 *p = tmp->next;
96 return;
97 }
98}
99
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900100struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
101 unsigned long flags)
102{
103 struct pmb_entry *pmbe;
104
105 pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL);
106 if (!pmbe)
107 return ERR_PTR(-ENOMEM);
108
109 pmbe->vpn = vpn;
110 pmbe->ppn = ppn;
111 pmbe->flags = flags;
112
Paul Mundt38c425f2007-05-11 11:26:10 +0900113 spin_lock_irq(&pmb_list_lock);
114 pmb_list_add(pmbe);
115 spin_unlock_irq(&pmb_list_lock);
116
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900117 return pmbe;
118}
119
120void pmb_free(struct pmb_entry *pmbe)
121{
Paul Mundt38c425f2007-05-11 11:26:10 +0900122 spin_lock_irq(&pmb_list_lock);
123 pmb_list_del(pmbe);
124 spin_unlock_irq(&pmb_list_lock);
125
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900126 kmem_cache_free(pmb_cache, pmbe);
127}
128
129/*
130 * Must be in P2 for __set_pmb_entry()
131 */
132int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
133 unsigned long flags, int *entry)
134{
135 unsigned int pos = *entry;
136
137 if (unlikely(pos == PMB_NO_ENTRY))
138 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
139
140repeat:
141 if (unlikely(pos > NR_PMB_ENTRIES))
142 return -ENOSPC;
143
144 if (test_and_set_bit(pos, &pmb_map)) {
145 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
146 goto repeat;
147 }
148
149 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
150
Paul Mundte7bd34a2007-07-31 17:07:28 +0900151#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900152 /*
153 * When we are in 32-bit address extended mode, CCR.CB becomes
154 * invalid, so care must be taken to manually adjust cacheable
155 * translations.
156 */
157 if (likely(flags & PMB_C))
158 flags |= PMB_WT;
159#endif
160
161 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
162
163 *entry = pos;
164
165 return 0;
166}
167
Stuart Menefycbaa1182007-11-30 17:06:36 +0900168int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900169{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900170 int ret;
171
Stuart Menefycbaa1182007-11-30 17:06:36 +0900172 jump_to_uncached();
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900173 ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900174 back_to_cached();
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900175
176 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900177}
178
Stuart Menefycbaa1182007-11-30 17:06:36 +0900179void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900180{
181 unsigned int entry = pmbe->entry;
182 unsigned long addr;
183
184 /*
185 * Don't allow clearing of wired init entries, P1 or P2 access
186 * without a corresponding mapping in the PMB will lead to reset
187 * by the TLB.
188 */
189 if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
190 entry >= NR_PMB_ENTRIES))
191 return;
192
Stuart Menefycbaa1182007-11-30 17:06:36 +0900193 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900194
195 /* Clear V-bit */
196 addr = mk_pmb_addr(entry);
197 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
198
199 addr = mk_pmb_data(entry);
200 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
201
Stuart Menefycbaa1182007-11-30 17:06:36 +0900202 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900203
204 clear_bit(entry, &pmb_map);
205}
206
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900207
208static struct {
209 unsigned long size;
210 int flag;
211} pmb_sizes[] = {
212 { .size = 0x20000000, .flag = PMB_SZ_512M, },
213 { .size = 0x08000000, .flag = PMB_SZ_128M, },
214 { .size = 0x04000000, .flag = PMB_SZ_64M, },
215 { .size = 0x01000000, .flag = PMB_SZ_16M, },
216};
217
218long pmb_remap(unsigned long vaddr, unsigned long phys,
219 unsigned long size, unsigned long flags)
220{
221 struct pmb_entry *pmbp;
222 unsigned long wanted;
223 int pmb_flags, i;
224
225 /* Convert typical pgprot value to the PMB equivalent */
226 if (flags & _PAGE_CACHABLE) {
227 if (flags & _PAGE_WT)
228 pmb_flags = PMB_WT;
229 else
230 pmb_flags = PMB_C;
231 } else
232 pmb_flags = PMB_WT | PMB_UB;
233
234 pmbp = NULL;
235 wanted = size;
236
237again:
238 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
239 struct pmb_entry *pmbe;
240 int ret;
241
242 if (size < pmb_sizes[i].size)
243 continue;
244
245 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
246 if (IS_ERR(pmbe))
247 return PTR_ERR(pmbe);
248
249 ret = set_pmb_entry(pmbe);
250 if (ret != 0) {
251 pmb_free(pmbe);
252 return -EBUSY;
253 }
254
255 phys += pmb_sizes[i].size;
256 vaddr += pmb_sizes[i].size;
257 size -= pmb_sizes[i].size;
258
259 /*
260 * Link adjacent entries that span multiple PMB entries
261 * for easier tear-down.
262 */
263 if (likely(pmbp))
264 pmbp->link = pmbe;
265
266 pmbp = pmbe;
267 }
268
269 if (size >= 0x1000000)
270 goto again;
271
272 return wanted - size;
273}
274
275void pmb_unmap(unsigned long addr)
276{
277 struct pmb_entry **p, *pmbe;
278
279 for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
280 if (pmbe->vpn == addr)
281 break;
282
283 if (unlikely(!pmbe))
284 return;
285
286 WARN_ON(!test_bit(pmbe->entry, &pmb_map));
287
288 do {
289 struct pmb_entry *pmblink = pmbe;
290
291 clear_pmb_entry(pmbe);
292 pmbe = pmblink->link;
293
294 pmb_free(pmblink);
295 } while (pmbe);
296}
297
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700298static void pmb_cache_ctor(void *pmb)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900299{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900300 struct pmb_entry *pmbe = pmb;
301
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900302 memset(pmb, 0, sizeof(struct pmb_entry));
303
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900304 pmbe->entry = PMB_NO_ENTRY;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900305}
306
Stuart Menefycbaa1182007-11-30 17:06:36 +0900307static int __uses_jump_to_uncached pmb_init(void)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900308{
309 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
Nobuhiro Iwamatsu53ff0942007-11-30 12:33:17 +0900310 unsigned int entry, i;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900311
312 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
313
Akinobu Mita0e6b9c92007-05-08 00:23:13 -0700314 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +0900315 SLAB_PANIC, pmb_cache_ctor);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900316
Stuart Menefycbaa1182007-11-30 17:06:36 +0900317 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900318
319 /*
320 * Ordering is important, P2 must be mapped in the PMB before we
321 * can set PMB.SE, and P1 must be mapped before we jump back to
322 * P1 space.
323 */
324 for (entry = 0; entry < nr_entries; entry++) {
325 struct pmb_entry *pmbe = pmb_init_map + entry;
326
327 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
328 }
329
330 ctrl_outl(0, PMB_IRMCR);
331
332 /* PMB.SE and UB[7] */
333 ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
334
Stuart Menefyeddeeb32007-11-26 21:32:40 +0900335 /* Flush out the TLB */
336 i = ctrl_inl(MMUCR);
337 i |= MMUCR_TI;
338 ctrl_outl(i, MMUCR);
339
Stuart Menefycbaa1182007-11-30 17:06:36 +0900340 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900341
342 return 0;
343}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900344arch_initcall(pmb_init);
345
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900346static int pmb_seq_show(struct seq_file *file, void *iter)
347{
348 int i;
349
350 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
351 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
352 seq_printf(file, "ety vpn ppn size flags\n");
353
354 for (i = 0; i < NR_PMB_ENTRIES; i++) {
355 unsigned long addr, data;
356 unsigned int size;
357 char *sz_str = NULL;
358
359 addr = ctrl_inl(mk_pmb_addr(i));
360 data = ctrl_inl(mk_pmb_data(i));
361
362 size = data & PMB_SZ_MASK;
363 sz_str = (size == PMB_SZ_16M) ? " 16MB":
364 (size == PMB_SZ_64M) ? " 64MB":
365 (size == PMB_SZ_128M) ? "128MB":
366 "512MB";
367
368 /* 02: V 0x88 0x08 128MB C CB B */
369 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
370 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
371 (addr >> 24) & 0xff, (data >> 24) & 0xff,
372 sz_str, (data & PMB_C) ? 'C' : ' ',
373 (data & PMB_WT) ? "WT" : "CB",
374 (data & PMB_UB) ? "UB" : " B");
375 }
376
377 return 0;
378}
379
380static int pmb_debugfs_open(struct inode *inode, struct file *file)
381{
382 return single_open(file, pmb_seq_show, NULL);
383}
384
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800385static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900386 .owner = THIS_MODULE,
387 .open = pmb_debugfs_open,
388 .read = seq_read,
389 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800390 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900391};
392
393static int __init pmb_debugfs_init(void)
394{
395 struct dentry *dentry;
396
397 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900398 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800399 if (!dentry)
400 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900401 if (IS_ERR(dentry))
402 return PTR_ERR(dentry);
403
404 return 0;
405}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900406postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000407
408#ifdef CONFIG_PM
409static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
410{
411 static pm_message_t prev_state;
412
413 /* Restore the PMB after a resume from hibernation */
414 if (state.event == PM_EVENT_ON &&
415 prev_state.event == PM_EVENT_FREEZE) {
416 struct pmb_entry *pmbe;
417 spin_lock_irq(&pmb_list_lock);
418 for (pmbe = pmb_list; pmbe; pmbe = pmbe->next)
419 set_pmb_entry(pmbe);
420 spin_unlock_irq(&pmb_list_lock);
421 }
422 prev_state = state;
423 return 0;
424}
425
426static int pmb_sysdev_resume(struct sys_device *dev)
427{
428 return pmb_sysdev_suspend(dev, PMSG_ON);
429}
430
431static struct sysdev_driver pmb_sysdev_driver = {
432 .suspend = pmb_sysdev_suspend,
433 .resume = pmb_sysdev_resume,
434};
435
436static int __init pmb_sysdev_init(void)
437{
438 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
439}
440
441subsys_initcall(pmb_sysdev_init);
442#endif