Merge branch 'sh/ioremap-fixed'
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index c23e672..f059ed6 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -56,6 +56,7 @@
 	}
 
 	if (!tsk_used_math(tsk)) {
+		local_irq_enable();
 		/*
 		 * does a slab alloc which can sleep
 		 */
@@ -66,6 +67,7 @@
 			do_group_exit(SIGKILL);
 			return;
 		}
+		local_irq_disable();
 	}
 
 	grab_fpu(regs);
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index e5d421db..8ee31a0 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -3,6 +3,7 @@
  *  arch/sh/kernel/head.S
  *
  *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+ *  Copyright (C) 2010  Matt Fleming
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -84,6 +85,236 @@
 	ldc	r0, r7_bank	! ... and initial thread_info
 #endif
 
+#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
+	/*
+	 * Reconfigure the initial PMB mappings setup by the hardware.
+	 *
+	 * When we boot in 32-bit MMU mode there are 2 PMB entries already
+	 * setup for us.
+	 *
+	 * Entry       VPN	   PPN	    V	SZ	C	UB	WT
+	 * ---------------------------------------------------------------
+	 *   0	    0x80000000 0x00000000   1  512MB	1	0	1
+	 *   1	    0xA0000000 0x00000000   1  512MB	0	0	0
+	 *
+	 * But we reprogram them here because we want complete control over
+	 * our address space and the initial mappings may not map PAGE_OFFSET
+	 * to __MEMORY_START (or even map all of our RAM).
+	 *
+	 * Once we've setup cached and uncached mappings for all of RAM we
+	 * clear the rest of the PMB entries.
+	 *
+	 * This clearing also deals with the fact that PMB entries can persist
+	 * across reboots. The PMB could have been left in any state when the
+	 * reboot occurred, so to be safe we clear all entries and start with
+	 * with a clean slate.
+	 */
+
+	mov.l	.LMMUCR, r1	/* Flush the TLB */
+	mov.l	@r1, r0
+	or	#MMUCR_TI, r0
+	mov.l	r0, @r1
+
+	mov.l	.LMEMORY_SIZE, r5
+	mov	r5, r7
+
+	mov	#PMB_E_SHIFT, r0
+	mov	#0x1, r4
+	shld	r0, r4
+
+	mov.l	.LFIRST_DATA_ENTRY, r0
+	mov.l	.LPMB_DATA, r1
+	mov.l	.LFIRST_ADDR_ENTRY, r2
+	mov.l	.LPMB_ADDR, r3
+
+	mov	#0, r10
+
+	/*
+	 * r0 = PMB_DATA data field
+	 * r1 = PMB_DATA address field
+	 * r2 = PMB_ADDR data field
+	 * r3 = PMB_ADDR address field
+	 * r4 = PMB_E_SHIFT
+	 * r5 = remaining amount of RAM to map
+	 * r6 = PMB mapping size we're trying to use
+	 * r7 = cached_to_uncached
+	 * r8 = scratch register
+	 * r9 = scratch register
+	 * r10 = number of PMB entries we've setup
+	 */
+.L512:
+	mov	#(512 >> 4), r6
+	shll16	r6
+	shll8	r6
+
+	cmp/hi	r5, r6
+	bt	.L128
+
+	mov	#(PMB_SZ_512M >> 2), r9
+	shll2	r9
+
+	/*
+	 * Cached mapping
+	 */
+	mov	#PMB_C, r8
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov.l	r2, @r3
+
+	add	r4, r1		/* Increment to the next PMB_DATA entry */
+	add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	add	#1, r10		/* Increment number of PMB entries */
+
+	/*
+	 * Uncached mapping
+	 */
+	mov	#(PMB_UB >> 8), r8
+	shll8	r8
+
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov	r2, r8
+	add	r7, r8
+	mov.l	r8, @r3
+
+	add	r4, r1		/* Increment to the next PMB_DATA entry */
+	add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	add	#1, r10		/* Increment number of PMB entries */
+
+	sub	r6, r5
+	add	r6, r0
+	add	r6, r2
+
+	bra	.L512
+
+.L128:
+	mov	#(128 >> 4), r6
+	shll16	r6
+	shll8	r6
+
+	cmp/hi	r5, r6
+	bt	.L64
+
+	mov	#(PMB_SZ_128M >> 2), r9
+	shll2	r9
+
+	/*
+	 * Cached mapping
+	 */
+	mov	#PMB_C, r8
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov.l	r2, @r3
+
+	add	r4, r1		/* Increment to the next PMB_DATA entry */
+	add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	add	#1, r10		/* Increment number of PMB entries */
+
+	/*
+	 * Uncached mapping
+	 */
+	mov	#(PMB_UB >> 8), r8
+	shll8	r8
+
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov	r2, r8
+	add	r7, r8
+	mov.l	r8, @r3
+
+	add	r4, r1		/* Increment to the next PMB_DATA entry */
+	add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	add	#1, r10		/* Increment number of PMB entries */
+
+	sub	r6, r5
+	add	r6, r0
+	add	r6, r2
+
+	bra	.L128
+
+.L64:
+	mov	#(64 >> 4), r6
+	shll16	r6
+	shll8	r6
+
+	cmp/hi	r5, r6
+	bt	.Ldone
+
+	mov	#(PMB_SZ_64M >> 2), r9
+	shll2	r9
+
+	/*
+	 * Cached mapping
+	 */
+	mov	#PMB_C, r8
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov.l	r2, @r3
+
+	add	r4, r1		/* Increment to the next PMB_DATA entry */
+	add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	add	#1, r10		/* Increment number of PMB entries */
+
+	/*
+	 * Uncached mapping
+	 */
+	mov	#(PMB_UB >> 8), r8
+	shll8	r8
+
+	or	r0, r8
+	or	r9, r8
+	mov.l	r8, @r1
+	mov	r2, r8
+	add	r7, r8
+	mov.l	r8, @r3
+
+	add	r4, r1		/* Increment to the next PMB_DATA entry */
+	add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	add	#1, r10		/* Increment number of PMB entries */
+
+	sub	r6, r5
+	add	r6, r0
+	add	r6, r2
+
+	bra	.L64
+
+.Ldone:
+	/* Update cached_to_uncached */
+	mov.l	.Lcached_to_uncached, r0
+	mov.l	r7, @r0
+
+	/*
+	 * Clear the remaining PMB entries.
+	 *
+	 * r3 = entry to begin clearing from
+	 * r10 = number of entries we've setup so far
+	 */
+	mov	#0, r1
+	mov	#PMB_ENTRY_MAX, r0
+
+.Lagain:
+	mov.l	r1, @r3		/* Clear PMB_ADDR entry */
+	add	#1, r10		/* Increment the loop counter */
+	cmp/eq	r0, r10
+	bf/s	.Lagain
+	 add	r4, r3		/* Increment to the next PMB_ADDR entry */
+
+	mov.l	6f, r0
+	icbi	@r0
+
+#endif /* !CONFIG_PMB_LEGACY */
+
 #ifndef CONFIG_SH_NO_BSS_INIT
 	/*
 	 * Don't clear BSS if running on slow platforms such as an RTL simulation,
@@ -133,3 +364,13 @@
 5:	.long	start_kernel
 6:	.long	sh_cpu_init
 7:	.long	init_thread_union
+
+#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
+.LPMB_ADDR:		.long	PMB_ADDR
+.LPMB_DATA:		.long	PMB_DATA
+.LFIRST_ADDR_ENTRY:	.long	PAGE_OFFSET | PMB_V
+.LFIRST_DATA_ENTRY:	.long	__MEMORY_START | PMB_V
+.LMMUCR:		.long	MMUCR
+.Lcached_to_uncached:	.long	cached_to_uncached
+.LMEMORY_SIZE:		.long	__MEMORY_SIZE
+#endif
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 8f7dbf1..b796b6c 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,11 +3,8 @@
  *
  * Privileged Space Mapping Buffer (PMB) Support.
  *
- * Copyright (C) 2005 - 2010 Paul Mundt
- *
- * P1/P2 Section mapping definitions from map32.h, which was:
- *
- *	Copyright 2003 (c) Lineo Solutions,Inc.
+ * Copyright (C) 2005 - 2010  Paul Mundt
+ * Copyright (C) 2010  Matt Fleming
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -280,12 +277,119 @@
 }
 
 #ifdef CONFIG_PMB_LEGACY
+static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+	return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
+}
+
 static int pmb_apply_legacy_mappings(void)
 {
+	unsigned int applied = 0;
+	int i;
+
+	pr_info("PMB: Preserving legacy mappings:\n");
+
+	/*
+	 * The following entries are setup by the bootloader.
+	 *
+	 * Entry       VPN	   PPN	    V	SZ	C	UB
+	 * --------------------------------------------------------
+	 *   0      0xA0000000 0x00000000   1   64MB    0       0
+	 *   1      0xA4000000 0x04000000   1   16MB    0       0
+	 *   2      0xA6000000 0x08000000   1   16MB    0       0
+	 *   9      0x88000000 0x48000000   1  128MB    1       1
+	 *  10      0x90000000 0x50000000   1  128MB    1       1
+	 *  11      0x98000000 0x58000000   1  128MB    1       1
+	 *  13      0xA8000000 0x48000000   1  128MB    0       0
+	 *  14      0xB0000000 0x50000000   1  128MB    0       0
+	 *  15      0xB8000000 0x58000000   1  128MB    0       0
+	 *
+	 * The only entries the we need are the ones that map the kernel
+	 * at the cached and uncached addresses.
+	 */
+	for (i = 0; i < PMB_ENTRY_MAX; i++) {
+		unsigned long addr, data;
+		unsigned long addr_val, data_val;
+		unsigned long ppn, vpn;
+
+		addr = mk_pmb_addr(i);
+		data = mk_pmb_data(i);
+
+		addr_val = __raw_readl(addr);
+		data_val = __raw_readl(data);
+
+		/*
+		 * Skip over any bogus entries
+		 */
+		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
+			continue;
+
+		ppn = data_val & PMB_PFN_MASK;
+		vpn = addr_val & PMB_PFN_MASK;
+
+		/*
+		 * Only preserve in-range mappings.
+		 */
+		if (pmb_ppn_in_range(ppn)) {
+			unsigned int size;
+			char *sz_str = NULL;
+
+			size = data_val & PMB_SZ_MASK;
+
+			sz_str = (size == PMB_SZ_16M)  ? " 16MB":
+				 (size == PMB_SZ_64M)  ? " 64MB":
+				 (size == PMB_SZ_128M) ? "128MB":
+							 "512MB";
+
+			pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
+				vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
+				(data_val & PMB_C) ? "" : "un");
+
+			applied++;
+		} else {
+			/*
+			 * Invalidate anything out of bounds.
+			 */
+			__raw_writel(addr_val & ~PMB_V, addr);
+			__raw_writel(data_val & ~PMB_V, data);
+		}
+	}
+
+	return (applied == 0);
+}
+#else
+static inline int pmb_apply_legacy_mappings(void)
+{
+	return 1;
+}
+#endif
+
+int __uses_jump_to_uncached pmb_init(void)
+{
 	int i;
 	unsigned long addr, data;
-	unsigned int applied = 0;
+	unsigned long ret;
 
+	jump_to_uncached();
+
+	/*
+	 * Attempt to apply the legacy boot mappings if configured. If
+	 * this is successful then we simply carry on with those and
+	 * don't bother establishing additional memory mappings. Dynamic
+	 * device mappings through pmb_remap() can still be bolted on
+	 * after this.
+	 */
+	ret = pmb_apply_legacy_mappings();
+	if (ret == 0) {
+		back_to_cached();
+		return 0;
+	}
+
+	/*
+	 * Sync our software copy of the PMB mappings with those in
+	 * hardware. The mappings in the hardware PMB were either set up
+	 * by the bootloader or very early on by the kernel.
+	 */
 	for (i = 0; i < PMB_ENTRY_MAX; i++) {
 		struct pmb_entry *pmbe;
 		unsigned long vpn, ppn, flags;
@@ -318,60 +422,10 @@
 
 		pmbe = pmb_alloc(vpn, ppn, flags, i);
 		WARN_ON(IS_ERR(pmbe));
-
-		applied++;
 	}
 
-	return (applied == 0);
-}
-#else
-static inline int pmb_apply_legacy_mappings(void)
-{
-	return 1;
-}
-#endif
-
-int __uses_jump_to_uncached pmb_init(void)
-{
-	unsigned int i;
-	unsigned long size, ret;
-
-	jump_to_uncached();
-
-	/*
-	 * Attempt to apply the legacy boot mappings if configured. If
-	 * this is successful then we simply carry on with those and
-	 * don't bother establishing additional memory mappings. Dynamic
-	 * device mappings through pmb_remap() can still be bolted on
-	 * after this.
-	 */
-	ret = pmb_apply_legacy_mappings();
-	if (ret == 0) {
-		back_to_cached();
-		return 0;
-	}
-
-	/*
-	 * Insert PMB entries for the P1 and P2 areas so that, after
-	 * we've switched the MMU to 32-bit mode, the semantics of P1
-	 * and P2 are the same as in 29-bit mode, e.g.
-	 *
-	 *	P1 - provides a cached window onto physical memory
-	 *	P2 - provides an uncached window onto physical memory
-	 */
-	size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
-
-	ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
-	BUG_ON(ret != size);
-
-	ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
-	BUG_ON(ret != size);
-
 	ctrl_outl(0, PMB_IRMCR);
 
-	/* PMB.SE and UB[7] */
-	ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
-
 	/* Flush out the TLB */
 	i =  ctrl_inl(MMUCR);
 	i |= MMUCR_TI;