Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
  [SPARC64]: Add a secondary TSB for hugepage mappings.
  [SPARC]: Respect vm_page_prot in io_remap_page_range().
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
index 2cb0728..1ef7fa0 100644
--- a/arch/sparc/mm/generic.c
+++ b/arch/sparc/mm/generic.c
@@ -76,7 +76,6 @@
 	vma->vm_pgoff = (offset >> PAGE_SHIFT) |
 		((unsigned long)space << 28UL);
 
-	prot = __pgprot(pg_iobits);
 	offset -= from;
 	dir = pgd_offset(mm, from);
 	flush_cache_range(vma, beg, end);
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
index e9f9571..36b4d24 100644
--- a/arch/sparc/mm/loadmmu.c
+++ b/arch/sparc/mm/loadmmu.c
@@ -22,8 +22,6 @@
 struct ctx_list ctx_free;
 struct ctx_list ctx_used;
 
-unsigned int pg_iobits;
-
 extern void ld_mmu_sun4c(void);
 extern void ld_mmu_srmmu(void);
 
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c664b96..27b0e0b 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -2130,6 +2130,13 @@
 	return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
 }
 
+static pgprot_t srmmu_pgprot_noncached(pgprot_t prot)
+{
+	prot &= ~__pgprot(SRMMU_CACHE);
+
+	return prot;
+}
+
 /* Load up routines and constants for sun4m and sun4d mmu */
 void __init ld_mmu_srmmu(void)
 {
@@ -2150,9 +2157,9 @@
 	BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
 	BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
 	page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
-	pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
 
 	/* Functions */
+	BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM);
 #ifndef CONFIG_SMP	
 	BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
 #endif
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 731f196..49f28c1 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1589,7 +1589,10 @@
 
 static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
 {
-	unsigned long page_entry;
+	unsigned long page_entry, pg_iobits;
+
+	pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
+		    _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
 
 	page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
 	page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
@@ -2134,6 +2137,13 @@
 	printk("SUN4C: %d mmu entries for the kernel\n", cnt);
 }
 
+static pgprot_t sun4c_pgprot_noncached(pgprot_t prot)
+{
+	prot |= __pgprot(_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE);
+
+	return prot;
+}
+
 /* Load up routines and constants for sun4c mmu */
 void __init ld_mmu_sun4c(void)
 {
@@ -2156,10 +2166,9 @@
 	BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
 	BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
 	page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
-	pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
-		    _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
 
 	/* Functions */
+	BTFIXUPSET_CALL(pgprot_noncached, sun4c_pgprot_noncached, BTFIXUPCALL_NORM);
 	BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
 	BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
 	
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index c3685b3..267afdd 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -175,11 +175,11 @@
 	bool "4MB"
 
 config HUGETLB_PAGE_SIZE_512K
-	depends on !SPARC64_PAGE_SIZE_4MB
+	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
 	bool "512K"
 
 config HUGETLB_PAGE_SIZE_64K
-	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
+	depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64K
 	bool "64K"
 
 endchoice
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 95ffa94..dfccff2 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -656,6 +656,7 @@
 	__pci_mmap_set_flags(dev, vma, mmap_state);
 	__pci_mmap_set_pgprot(dev, vma, mmap_state);
 
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	ret = io_remap_pfn_range(vma, vma->vm_start,
 				 vma->vm_pgoff,
 				 vma->vm_end - vma->vm_start,
@@ -663,7 +664,6 @@
 	if (ret)
 		return ret;
 
-	vma->vm_flags |= VM_IO;
 	return 0;
 }
 
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index ab23ddb..b731881 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -29,15 +29,15 @@
 	 *
 	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
 	 * tsb_base = tsb_reg & ~0x7UL;
-	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
+	 * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
 	 * tsb_ptr = tsb_base + (tsb_index * 16);
 	 */
-#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2)	\
+#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
 	and	TSB_PTR, 0x7, TMP1;			\
 	mov	512, TMP2;				\
 	andn	TSB_PTR, 0x7, TSB_PTR;			\
 	sllx	TMP2, TMP1, TMP2;			\
-	srlx	VADDR, PAGE_SHIFT, TMP1;		\
+	srlx	VADDR, HASH_SHIFT, TMP1;		\
 	sub	TMP2, 1, TMP2;				\
 	and	TMP1, TMP2, TMP1;			\
 	sllx	TMP1, 4, TMP1;				\
@@ -53,7 +53,7 @@
 
 	LOAD_ITLB_INFO(%g2, %g4, %g5)
 	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
-	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+	COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
 	ldda	[%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -99,7 +99,7 @@
 
 	LOAD_DTLB_INFO(%g2, %g4, %g5)
 	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
-	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
+	COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
 	ldda	[%g1] ASI_QUAD_LDD_PHYS_4V, %g2
@@ -171,21 +171,26 @@
 
 	/* fallthrough */
 
-	/* Create TSB pointer into %g1.  This is something like:
-	 *
-	 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
-	 * tsb_base = tsb_reg & ~0x7UL;
-	 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
-	 * tsb_ptr = tsb_base + (tsb_index * 16);
-	 */
 sun4v_tsb_miss_common:
-	COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7)
+	COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
 
-	/* Branch directly to page table lookup.  We have SCRATCHPAD_MMU_MISS
-	 * still in %g2, so it's quite trivial to get at the PGD PHYS value
-	 * so we can preload it into %g7.
-	 */
 	sub	%g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+#ifdef CONFIG_HUGETLB_PAGE
+	mov	SCRATCHPAD_UTSBREG2, %g5
+	ldxa	[%g5] ASI_SCRATCHPAD, %g5
+	cmp	%g5, -1
+	be,pt	%xcc, 80f
+	 nop
+	COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
+
+	/* That clobbered %g2, reload it.  */
+	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	sub	%g2, TRAP_PER_CPU_FAULT_INFO, %g2
+
+80:	stx	%g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+#endif
+
 	ba,pt	%xcc, tsb_miss_page_table_walk_sun4v_fastpath
 	 ldx	[%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
 
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 7f7dba0..df612e4 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2482,6 +2482,7 @@
 
 extern void thread_info_offsets_are_bolixed_dave(void);
 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
 
 /* Only invoked on boot processor. */
 void __init trap_init(void)
@@ -2535,9 +2536,27 @@
 	    (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
 	     offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
 	    (TRAP_PER_CPU_CPU_LIST_PA !=
-	     offsetof(struct trap_per_cpu, cpu_list_pa)))
+	     offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+	    (TRAP_PER_CPU_TSB_HUGE !=
+	     offsetof(struct trap_per_cpu, tsb_huge)) ||
+	    (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+	     offsetof(struct trap_per_cpu, tsb_huge_temp)))
 		trap_per_cpu_offsets_are_bolixed_dave();
 
+	if ((TSB_CONFIG_TSB !=
+	     offsetof(struct tsb_config, tsb)) ||
+	    (TSB_CONFIG_RSS_LIMIT !=
+	     offsetof(struct tsb_config, tsb_rss_limit)) ||
+	    (TSB_CONFIG_NENTRIES !=
+	     offsetof(struct tsb_config, tsb_nentries)) ||
+	    (TSB_CONFIG_REG_VAL !=
+	     offsetof(struct tsb_config, tsb_reg_val)) ||
+	    (TSB_CONFIG_MAP_VADDR !=
+	     offsetof(struct tsb_config, tsb_map_vaddr)) ||
+	    (TSB_CONFIG_MAP_PTE !=
+	     offsetof(struct tsb_config, tsb_map_pte)))
+		tsb_config_offsets_are_bolixed_dave();
+
 	/* Attach to the address space of init_task.  On SMP we
 	 * do this in smp.c:smp_callin for other cpus.
 	 */
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 118baea..a0c8ba5 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -3,8 +3,13 @@
  * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  */
 
+#include <linux/config.h>
+
 #include <asm/tsb.h>
 #include <asm/hypervisor.h>
+#include <asm/page.h>
+#include <asm/cpudata.h>
+#include <asm/mmu.h>
 
 	.text
 	.align	32
@@ -34,34 +39,124 @@
 	 ldxa		[%g4] ASI_IMMU, %g4
 
 	/* At this point we have:
-	 * %g1 --	TSB entry address
+	 * %g1 --	PAGE_SIZE TSB entry address
 	 * %g3 --	FAULT_CODE_{D,I}TLB
 	 * %g4 --	missing virtual address
 	 * %g6 --	TAG TARGET (vaddr >> 22)
 	 */
 tsb_miss_page_table_walk:
-	TRAP_LOAD_PGD_PHYS(%g7, %g5)
+	TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
 
-	/* And now we have the PGD base physical address in %g7.  */
-tsb_miss_page_table_walk_sun4v_fastpath:
-	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
+	/* Before committing to a full page table walk,
+	 * check the huge page TSB.
+	 */
+#ifdef CONFIG_HUGETLB_PAGE
+
+661:	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
+	nop
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	mov		SCRATCHPAD_UTSBREG2, %g5
+	ldxa		[%g5] ASI_SCRATCHPAD, %g5
+	.previous
+
+	cmp		%g5, -1
+	be,pt		%xcc, 80f
+	 nop
+
+	/* We need an aligned pair of registers containing 2 values
+	 * which can be easily rematerialized.  %g6 and %g7 foot the
+	 * bill just nicely.  We'll save %g6 away into %g2 for the
+	 * huge page TSB TAG comparison.
+	 *
+	 * Perform a huge page TSB lookup.
+	 */
+	mov		%g6, %g2
+	and		%g5, 0x7, %g6
+	mov		512, %g7
+	andn		%g5, 0x7, %g5
+	sllx		%g7, %g6, %g7
+	srlx		%g4, HPAGE_SHIFT, %g6
+	sub		%g7, 1, %g7
+	and		%g6, %g7, %g6
+	sllx		%g6, 4, %g6
+	add		%g5, %g6, %g5
+
+	TSB_LOAD_QUAD(%g5, %g6)
+	cmp		%g6, %g2
+	be,a,pt		%xcc, tsb_tlb_reload
+	 mov		%g7, %g5
+
+	/* No match, remember the huge page TSB entry address,
+	 * and restore %g6 and %g7.
+	 */
+	TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
+	srlx		%g4, 22, %g6
+80:	stx		%g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
+
+#endif
+
+	ldx		[%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
 
 	/* At this point we have:
 	 * %g1 --	TSB entry address
 	 * %g3 --	FAULT_CODE_{D,I}TLB
-	 * %g5 --	physical address of PTE in Linux page tables
+	 * %g4 --	missing virtual address
+	 * %g6 --	TAG TARGET (vaddr >> 22)
+	 * %g7 --	page table physical address
+	 *
+	 * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
+	 * TSB both lack a matching entry.
+	 */
+tsb_miss_page_table_walk_sun4v_fastpath:
+	USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
+
+	/* Load and check PTE.  */
+	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	brgez,pn	%g5, tsb_do_fault
+	 nop
+
+#ifdef CONFIG_HUGETLB_PAGE
+661:	sethi		%uhi(_PAGE_SZALL_4U), %g7
+	sllx		%g7, 32, %g7
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	mov		_PAGE_SZALL_4V, %g7
+	nop
+	.previous
+
+	and		%g5, %g7, %g2
+
+661:	sethi		%uhi(_PAGE_SZHUGE_4U), %g7
+	sllx		%g7, 32, %g7
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	mov		_PAGE_SZHUGE_4V, %g7
+	nop
+	.previous
+
+	cmp		%g2, %g7
+	bne,pt		%xcc, 60f
+	 nop
+
+	/* It is a huge page, use huge page TSB entry address we
+	 * calculated above.
+	 */
+	TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
+	ldx		[%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
+	cmp		%g2, -1
+	movne		%xcc, %g2, %g1
+60:
+#endif
+
+	/* At this point we have:
+	 * %g1 --	TSB entry address
+	 * %g3 --	FAULT_CODE_{D,I}TLB
+	 * %g5 --	valid PTE
 	 * %g6 --	TAG TARGET (vaddr >> 22)
 	 */
 tsb_reload:
 	TSB_LOCK_TAG(%g1, %g2, %g7)
-
-	/* Load and check PTE.  */
-	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
-	mov		1, %g7
-	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
-	brgez,a,pn	%g5, tsb_do_fault
-	 TSB_STORE(%g1, %g7)
-
 	TSB_WRITE(%g1, %g5, %g6)
 
 	/* Finally, load TLB and return from trap.  */
@@ -240,10 +335,9 @@
 	 * schedule() time.
 	 *
 	 * %o0: page table physical address
-	 * %o1:	TSB register value
-	 * %o2:	TSB virtual address
-	 * %o3:	TSB mapping locked PTE
-	 * %o4:	Hypervisor TSB descriptor physical address
+	 * %o1:	TSB base config pointer
+	 * %o2:	TSB huge config pointer, or NULL if none
+	 * %o3:	Hypervisor TSB descriptor physical address
 	 *
 	 * We have to run this whole thing with interrupts
 	 * disabled so that the current cpu doesn't change
@@ -253,63 +347,79 @@
 	.globl	__tsb_context_switch
 	.type	__tsb_context_switch,#function
 __tsb_context_switch:
-	rdpr	%pstate, %o5
-	wrpr	%o5, PSTATE_IE, %pstate
+	rdpr	%pstate, %g1
+	wrpr	%g1, PSTATE_IE, %pstate
 
-	ldub	[%g6 + TI_CPU], %g1
-	sethi	%hi(trap_block), %g2
-	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1
-	or	%g2, %lo(trap_block), %g2
-	add	%g2, %g1, %g2
+	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
+
 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-	sethi	%hi(tlb_type), %g1
-	lduw	[%g1 + %lo(tlb_type)], %g1
-	cmp	%g1, 3
-	bne,pt	%icc, 1f
+	ldx	[%o1 + TSB_CONFIG_REG_VAL], %o0
+	brz,pt	%o2, 1f
+	 mov	-1, %g3
+
+	ldx	[%o2 + TSB_CONFIG_REG_VAL], %g3
+
+1:	stx	%g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
+
+	sethi	%hi(tlb_type), %g2
+	lduw	[%g2 + %lo(tlb_type)], %g2
+	cmp	%g2, 3
+	bne,pt	%icc, 50f
 	 nop
 
 	/* Hypervisor TSB switch. */
-	mov	SCRATCHPAD_UTSBREG1, %g1
-	stxa	%o1, [%g1] ASI_SCRATCHPAD
-	mov	-1, %g2
-	mov	SCRATCHPAD_UTSBREG2, %g1
-	stxa	%g2, [%g1] ASI_SCRATCHPAD
+	mov	SCRATCHPAD_UTSBREG1, %o5
+	stxa	%o0, [%o5] ASI_SCRATCHPAD
+	mov	SCRATCHPAD_UTSBREG2, %o5
+	stxa	%g3, [%o5] ASI_SCRATCHPAD
 
-	/* Save away %o5's %pstate, we have to use %o5 for
-	 * the hypervisor call.
-	 */
-	mov	%o5, %g1
+	mov	2, %o0
+	cmp	%g3, -1
+	move	%xcc, 1, %o0
 
 	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
-	mov	1, %o0
-	mov	%o4, %o1
+	mov	%o3, %o1
 	ta	HV_FAST_TRAP
 
-	/* Finish up and restore %o5.  */
+	/* Finish up.  */
 	ba,pt	%xcc, 9f
-	 mov	%g1, %o5
-
-	/* SUN4U TSB switch.  */
-1:	mov	TSB_REG, %g1
-	stxa	%o1, [%g1] ASI_DMMU
-	membar	#Sync
-	stxa	%o1, [%g1] ASI_IMMU
-	membar	#Sync
-
-2:	brz	%o2, 9f
 	 nop
 
+	/* SUN4U TSB switch.  */
+50:	mov	TSB_REG, %o5
+	stxa	%o0, [%o5] ASI_DMMU
+	membar	#Sync
+	stxa	%o0, [%o5] ASI_IMMU
+	membar	#Sync
+
+2:	ldx	[%o1 + TSB_CONFIG_MAP_VADDR], %o4
+	brz	%o4, 9f
+	 ldx	[%o1 + TSB_CONFIG_MAP_PTE], %o5
+
 	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
-	mov	TLB_TAG_ACCESS, %g1
+	mov	TLB_TAG_ACCESS, %g3
 	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
-	stxa	%o2, [%g1] ASI_DMMU
+	stxa	%o4, [%g3] ASI_DMMU
 	membar	#Sync
 	sllx	%g2, 3, %g2
-	stxa	%o3, [%g2] ASI_DTLB_DATA_ACCESS
+	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
 	membar	#Sync
+
+	brz,pt	%o2, 9f
+	 nop
+
+	ldx	[%o2 + TSB_CONFIG_MAP_VADDR], %o4
+	ldx	[%o2 + TSB_CONFIG_MAP_PTE], %o5
+	mov	TLB_TAG_ACCESS, %g3
+	stxa	%o4, [%g3] ASI_DMMU
+	membar	#Sync
+	sub	%g2, (1 << 3), %g2
+	stxa	%o5, [%g2] ASI_DTLB_DATA_ACCESS
+	membar	#Sync
+
 9:
-	wrpr	%o5, %pstate
+	wrpr	%g1, %pstate
 
 	retl
 	 nop
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 63b6cc0..d21ff32 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -410,9 +410,18 @@
 	up_read(&mm->mmap_sem);
 
 	mm_rss = get_mm_rss(mm);
-	if (unlikely(mm_rss >= mm->context.tsb_rss_limit))
-		tsb_grow(mm, mm_rss);
-
+#ifdef CONFIG_HUGETLB_PAGE
+	mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#endif
+	if (unlikely(mm_rss >=
+		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
+		tsb_grow(mm, MM_TSB_BASE, mm_rss);
+#ifdef CONFIG_HUGETLB_PAGE
+	mm_rss = mm->context.huge_pte_count;
+	if (unlikely(mm_rss >=
+		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
+		tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+#endif
 	return;
 
 	/*
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 5fc5c57..8cb0620 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -140,7 +140,6 @@
 	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 	vma->vm_pgoff = phys_base >> PAGE_SHIFT;
 
-	prot = __pgprot(pg_iobits);
 	offset -= from;
 	dir = pgd_offset(mm, from);
 	flush_cache_range(vma, beg, end);
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 280dc79..074620d 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -199,13 +199,11 @@
 	pte_t *pte = NULL;
 
 	pgd = pgd_offset(mm, addr);
-	if (pgd) {
-		pud = pud_offset(pgd, addr);
-		if (pud) {
-			pmd = pmd_alloc(mm, pud, addr);
-			if (pmd)
-				pte = pte_alloc_map(mm, pmd, addr);
-		}
+	pud = pud_alloc(mm, pgd, addr);
+	if (pud) {
+		pmd = pmd_alloc(mm, pud, addr);
+		if (pmd)
+			pte = pte_alloc_map(mm, pmd, addr);
 	}
 	return pte;
 }
@@ -231,13 +229,14 @@
 	return pte;
 }
 
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
-
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t entry)
 {
 	int i;
 
+	if (!pte_present(*ptep) && pte_present(entry))
+		mm->context.huge_pte_count++;
+
 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 		set_pte_at(mm, addr, ptep, entry);
 		ptep++;
@@ -253,6 +252,8 @@
 	int i;
 
 	entry = *ptep;
+	if (pte_present(entry))
+		mm->context.huge_pte_count--;
 
 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 		pte_clear(mm, addr, ptep);
@@ -290,6 +291,15 @@
 
 void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 {
+	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+
+	if (likely(tp->tsb != NULL))
+		return;
+
+	tsb_grow(mm, MM_TSB_HUGE, 0);
+	tsb_context_switch(mm);
+	smp_tsb_sync(mm);
+
 	/* On UltraSPARC-III+ and later, configure the second half of
 	 * the Data-TLB for huge pages.
 	 */
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 2ae143b..ded63ee 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -283,6 +283,7 @@
 	struct mm_struct *mm;
 	struct tsb *tsb;
 	unsigned long tag, flags;
+	unsigned long tsb_index, tsb_hash_shift;
 
 	if (tlb_type != hypervisor) {
 		unsigned long pfn = pte_pfn(pte);
@@ -312,10 +313,26 @@
 
 	mm = vma->vm_mm;
 
+	tsb_index = MM_TSB_BASE;
+	tsb_hash_shift = PAGE_SHIFT;
+
 	spin_lock_irqsave(&mm->context.lock, flags);
 
-	tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
-			       (mm->context.tsb_nentries - 1UL)];
+#ifdef CONFIG_HUGETLB_PAGE
+	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
+		if ((tlb_type == hypervisor &&
+		     (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+		    (tlb_type != hypervisor &&
+		     (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
+			tsb_index = MM_TSB_HUGE;
+			tsb_hash_shift = HPAGE_SHIFT;
+		}
+	}
+#endif
+
+	tsb = mm->context.tsb_block[tsb_index].tsb;
+	tsb += ((address >> tsb_hash_shift) &
+		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
 	tag = (address >> 22UL);
 	tsb_insert(tsb, tag, pte_val(pte));
 
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index b2064e2..beaa028 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -15,9 +15,9 @@
 
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
 
-static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
 {
-	vaddr >>= PAGE_SHIFT;
+	vaddr >>= hash_shift;
 	return vaddr & (nentries - 1);
 }
 
@@ -36,7 +36,8 @@
 	unsigned long v;
 
 	for (v = start; v < end; v += PAGE_SIZE) {
-		unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
+		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
+					      KERNEL_TSB_NENTRIES);
 		struct tsb *ent = &swapper_tsb[hash];
 
 		if (tag_compare(ent->tag, v)) {
@@ -46,49 +47,91 @@
 	}
 }
 
-void flush_tsb_user(struct mmu_gather *mp)
+static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
 {
-	struct mm_struct *mm = mp->mm;
-	unsigned long nentries, base, flags;
-	struct tsb *tsb;
-	int i;
+	unsigned long i;
 
-	spin_lock_irqsave(&mm->context.lock, flags);
-
-	tsb = mm->context.tsb;
-	nentries = mm->context.tsb_nentries;
-
-	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
-		base = __pa(tsb);
-	else
-		base = (unsigned long) tsb;
-	
 	for (i = 0; i < mp->tlb_nr; i++) {
 		unsigned long v = mp->vaddrs[i];
 		unsigned long tag, ent, hash;
 
 		v &= ~0x1UL;
 
-		hash = tsb_hash(v, nentries);
-		ent = base + (hash * sizeof(struct tsb));
+		hash = tsb_hash(v, hash_shift, nentries);
+		ent = tsb + (hash * sizeof(struct tsb));
 		tag = (v >> 22UL);
 
 		tsb_flush(ent, tag);
 	}
+}
 
+void flush_tsb_user(struct mmu_gather *mp)
+{
+	struct mm_struct *mm = mp->mm;
+	unsigned long nentries, base, flags;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+		base = __pa(base);
+	__flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
+
+#ifdef CONFIG_HUGETLB_PAGE
+	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+			base = __pa(base);
+		__flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
+	}
+#endif
 	spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
-static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
+#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_64K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_512K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_4MB
+#else
+#error Broken base page size setting...
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_64K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_512K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
+#else
+#error Broken huge page size setting...
+#endif
+#endif
+
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
 {
 	unsigned long tsb_reg, base, tsb_paddr;
 	unsigned long page_sz, tte;
 
-	mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
+	mm->context.tsb_block[tsb_idx].tsb_nentries =
+		tsb_bytes / sizeof(struct tsb);
 
 	base = TSBMAP_BASE;
 	tte = pgprot_val(PAGE_KERNEL_LOCKED);
-	tsb_paddr = __pa(mm->context.tsb);
+	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
 
 	/* Use the smallest page size that can map the whole TSB
@@ -147,61 +190,49 @@
 		/* Physical mapping, no locked TLB entry for TSB.  */
 		tsb_reg |= tsb_paddr;
 
-		mm->context.tsb_reg_val = tsb_reg;
-		mm->context.tsb_map_vaddr = 0;
-		mm->context.tsb_map_pte = 0;
+		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
+		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
 	} else {
 		tsb_reg |= base;
 		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
 		tte |= (tsb_paddr & ~(page_sz - 1UL));
 
-		mm->context.tsb_reg_val = tsb_reg;
-		mm->context.tsb_map_vaddr = base;
-		mm->context.tsb_map_pte = tte;
+		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
+		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
 	}
 
 	/* Setup the Hypervisor TSB descriptor.  */
 	if (tlb_type == hypervisor) {
-		struct hv_tsb_descr *hp = &mm->context.tsb_descr;
+		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
 
-		switch (PAGE_SIZE) {
-		case 8192:
+		switch (tsb_idx) {
+		case MM_TSB_BASE:
+			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
+			break;
+#ifdef CONFIG_HUGETLB_PAGE
+		case MM_TSB_HUGE:
+			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
+			break;
+#endif
 		default:
-			hp->pgsz_idx = HV_PGSZ_IDX_8K;
-			break;
-
-		case 64 * 1024:
-			hp->pgsz_idx = HV_PGSZ_IDX_64K;
-			break;
-
-		case 512 * 1024:
-			hp->pgsz_idx = HV_PGSZ_IDX_512K;
-			break;
-
-		case 4 * 1024 * 1024:
-			hp->pgsz_idx = HV_PGSZ_IDX_4MB;
-			break;
+			BUG();
 		};
 		hp->assoc = 1;
 		hp->num_ttes = tsb_bytes / 16;
 		hp->ctx_idx = 0;
-		switch (PAGE_SIZE) {
-		case 8192:
+		switch (tsb_idx) {
+		case MM_TSB_BASE:
+			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
+			break;
+#ifdef CONFIG_HUGETLB_PAGE
+		case MM_TSB_HUGE:
+			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
+			break;
+#endif
 		default:
-			hp->pgsz_mask = HV_PGSZ_MASK_8K;
-			break;
-
-		case 64 * 1024:
-			hp->pgsz_mask = HV_PGSZ_MASK_64K;
-			break;
-
-		case 512 * 1024:
-			hp->pgsz_mask = HV_PGSZ_MASK_512K;
-			break;
-
-		case 4 * 1024 * 1024:
-			hp->pgsz_mask = HV_PGSZ_MASK_4MB;
-			break;
+			BUG();
 		};
 		hp->tsb_base = tsb_paddr;
 		hp->resv = 0;
@@ -241,11 +272,11 @@
 	}
 }
 
-/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
- * do_sparc64_fault() invokes this routine to try and grow the TSB.
+/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
+ * do_sparc64_fault() invokes this routine to try and grow it.
  *
  * When we reach the maximum TSB size supported, we stick ~0UL into
- * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
+ * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
  * will not trigger any longer.
  *
  * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
@@ -257,7 +288,7 @@
  * the number of entries that the current TSB can hold at once.  Currently,
  * we trigger when the RSS hits 3/4 of the TSB capacity.
  */
-void tsb_grow(struct mm_struct *mm, unsigned long rss)
+void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
 {
 	unsigned long max_tsb_size = 1 * 1024 * 1024;
 	unsigned long new_size, old_size, flags;
@@ -297,7 +328,8 @@
 		 * down to a 0-order allocation and force no TSB
 		 * growing for this address space.
 		 */
-		if (mm->context.tsb == NULL && new_cache_index > 0) {
+		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
+		    new_cache_index > 0) {
 			new_cache_index = 0;
 			new_size = 8192;
 			new_rss_limit = ~0UL;
@@ -307,8 +339,8 @@
 		/* If we failed on a TSB grow, we are under serious
 		 * memory pressure so don't try to grow any more.
 		 */
-		if (mm->context.tsb != NULL)
-			mm->context.tsb_rss_limit = ~0UL;
+		if (mm->context.tsb_block[tsb_index].tsb != NULL)
+			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
 		return;
 	}
 
@@ -339,23 +371,26 @@
 	 */
 	spin_lock_irqsave(&mm->context.lock, flags);
 
-	old_tsb = mm->context.tsb;
-	old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
-	old_size = mm->context.tsb_nentries * sizeof(struct tsb);
+	old_tsb = mm->context.tsb_block[tsb_index].tsb;
+	old_cache_index =
+		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
+	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
+		    sizeof(struct tsb));
 
 
 	/* Handle multiple threads trying to grow the TSB at the same time.
 	 * One will get in here first, and bump the size and the RSS limit.
 	 * The others will get in here next and hit this check.
 	 */
-	if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
+	if (unlikely(old_tsb &&
+		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
 		spin_unlock_irqrestore(&mm->context.lock, flags);
 
 		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
 		return;
 	}
 
-	mm->context.tsb_rss_limit = new_rss_limit;
+	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
 
 	if (old_tsb) {
 		extern void copy_tsb(unsigned long old_tsb_base,
@@ -372,8 +407,8 @@
 		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
 	}
 
-	mm->context.tsb = new_tsb;
-	setup_tsb_params(mm, new_size);
+	mm->context.tsb_block[tsb_index].tsb = new_tsb;
+	setup_tsb_params(mm, tsb_index, new_size);
 
 	spin_unlock_irqrestore(&mm->context.lock, flags);
 
@@ -394,40 +429,65 @@
 
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
+#ifdef CONFIG_HUGETLB_PAGE
+	unsigned long huge_pte_count;
+#endif
+	unsigned int i;
+
 	spin_lock_init(&mm->context.lock);
 
 	mm->context.sparc64_ctx_val = 0UL;
 
+#ifdef CONFIG_HUGETLB_PAGE
+	/* We reset it to zero because the fork() page copying
+	 * will re-increment the counters as the parent PTEs are
+	 * copied into the child address space.
+	 */
+	huge_pte_count = mm->context.huge_pte_count;
+	mm->context.huge_pte_count = 0;
+#endif
+
 	/* copy_mm() copies over the parent's mm_struct before calling
 	 * us, so we need to zero out the TSB pointer or else tsb_grow()
 	 * will be confused and think there is an older TSB to free up.
 	 */
-	mm->context.tsb = NULL;
+	for (i = 0; i < MM_NUM_TSBS; i++)
+		mm->context.tsb_block[i].tsb = NULL;
 
 	/* If this is fork, inherit the parent's TSB size.  We would
 	 * grow it to that size on the first page fault anyways.
 	 */
-	tsb_grow(mm, get_mm_rss(mm));
+	tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
 
-	if (unlikely(!mm->context.tsb))
+#ifdef CONFIG_HUGETLB_PAGE
+	if (unlikely(huge_pte_count))
+		tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+#endif
+
+	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
 		return -ENOMEM;
 
 	return 0;
 }
 
+static void tsb_destroy_one(struct tsb_config *tp)
+{
+	unsigned long cache_index;
+
+	if (!tp->tsb)
+		return;
+	cache_index = tp->tsb_reg_val & 0x7UL;
+	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
+	tp->tsb = NULL;
+	tp->tsb_reg_val = 0UL;
+}
+
 void destroy_context(struct mm_struct *mm)
 {
-	unsigned long flags, cache_index;
+	unsigned long flags, i;
 
-	cache_index = (mm->context.tsb_reg_val & 0x7UL);
-	kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
-
-	/* We can remove these later, but for now it's useful
-	 * to catch any bogus post-destroy_context() references
-	 * to the TSB.
-	 */
-	mm->context.tsb = NULL;
-	mm->context.tsb_reg_val = 0UL;
+	for (i = 0; i < MM_NUM_TSBS; i++)
+		tsb_destroy_one(&mm->context.tsb_block[i]);
 
 	spin_lock_irqsave(&ctx_alloc_lock, flags);
 
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 0291cd6..ffd0800 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -619,6 +619,7 @@
 #endif
 		offset = dev->driver->get_reg_ofs(dev);
 #ifdef __sparc__
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 		if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
 				       (map->offset + offset) >> PAGE_SHIFT,
 				       vma->vm_end - vma->vm_start,
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 6bdd768..2beb3dd 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -71,9 +71,8 @@
 	if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
 		size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
 
-	pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
-	pgprot_val(vma->vm_page_prot) |= _PAGE_E;
 	vma->vm_flags |= (VM_SHM | VM_LOCKED);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 	if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
 		return -EAGAIN;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 996c7b5..07d882b 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1169,11 +1169,6 @@
 	vma->vm_pgoff = off >> PAGE_SHIFT;
 	/* This is an IO map - tell maydump to skip this VMA */
 	vma->vm_flags |= VM_IO | VM_RESERVED;
-#if defined(__sparc_v9__)
-	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-				vma->vm_end - vma->vm_start, vma->vm_page_prot))
-		return -EAGAIN;
-#else
 #if defined(__mc68000__)
 #if defined(CONFIG_SUN3)
 	pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
@@ -1195,7 +1190,7 @@
 #elif defined(__i386__) || defined(__x86_64__)
 	if (boot_cpu_data.x86 > 3)
 		pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-#elif defined(__mips__)
+#elif defined(__mips__) || defined(__sparc_v9__)
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 #elif defined(__hppa__)
 	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
@@ -1212,7 +1207,6 @@
 	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
 			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
 		return -EAGAIN;
-#endif /* !__sparc_v9__ */
 	return 0;
 #endif /* !sparc32 */
 }
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index a4d7cc5..34ef859 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -58,6 +58,8 @@
 	/* To stop the swapper from even considering these pages */
 	vma->vm_flags |= (VM_IO | VM_RESERVED);
 	
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
 	/* Each page, see which map applies */
 	for (page = 0; page < size; ){
 		map_size = 0;
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index b33c354..9eea8f4 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -269,11 +269,14 @@
 
 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
+BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
 
 #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
 #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
 #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
 
+#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
+
 BTFIXUPDEF_INT(pte_modify_mask)
 
 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
@@ -309,9 +312,6 @@
 #define pte_unmap(pte)		do{}while(0)
 #define pte_unmap_nested(pte)	do{}while(0)
 
-/* The permissions for pgprot_val to make a page mapped on the obio space */
-extern unsigned int pg_iobits;
-
 /* Certain architectures need to do special things when pte's
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index c66a81b..9d6a6db 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -71,7 +71,8 @@
 /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list.  */
 	unsigned long		cpu_mondo_block_pa;
 	unsigned long		cpu_list_pa;
-	unsigned long		__pad1[2];
+	unsigned long		tsb_huge;
+	unsigned long		tsb_huge_temp;
 
 /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size.  */
 	unsigned long		__pad2[4];
@@ -116,6 +117,8 @@
 #define TRAP_PER_CPU_FAULT_INFO		0x40
 #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA	0xc0
 #define TRAP_PER_CPU_CPU_LIST_PA	0xc8
+#define TRAP_PER_CPU_TSB_HUGE		0xd0
+#define TRAP_PER_CPU_TSB_HUGE_TEMP	0xd8
 
 #define TRAP_BLOCK_SZ_SHIFT		8
 
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 230ba67..2d4f2ea 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,18 +90,39 @@
 extern void tsb_flush(unsigned long ent, unsigned long tag);
 extern void tsb_init(struct tsb *tsb, unsigned long size);
 
-typedef struct {
-	spinlock_t		lock;
-	unsigned long		sparc64_ctx_val;
+struct tsb_config {
 	struct tsb		*tsb;
 	unsigned long		tsb_rss_limit;
 	unsigned long		tsb_nentries;
 	unsigned long		tsb_reg_val;
 	unsigned long		tsb_map_vaddr;
 	unsigned long		tsb_map_pte;
-	struct hv_tsb_descr	tsb_descr;
+};
+
+#define MM_TSB_BASE	0
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define MM_TSB_HUGE	1
+#define MM_NUM_TSBS	2
+#else
+#define MM_NUM_TSBS	1
+#endif
+
+typedef struct {
+	spinlock_t		lock;
+	unsigned long		sparc64_ctx_val;
+	unsigned long		huge_pte_count;
+	struct tsb_config	tsb_block[MM_NUM_TSBS];
+	struct hv_tsb_descr	tsb_descr[MM_NUM_TSBS];
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
 
+#define TSB_CONFIG_TSB		0x00
+#define TSB_CONFIG_RSS_LIMIT	0x08
+#define TSB_CONFIG_NENTRIES	0x10
+#define TSB_CONFIG_REG_VAL	0x18
+#define TSB_CONFIG_MAP_VADDR	0x20
+#define TSB_CONFIG_MAP_PTE	0x28
+
 #endif /* __MMU_H */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index e797432..2337eb4 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -29,20 +29,25 @@
 extern void destroy_context(struct mm_struct *mm);
 
 extern void __tsb_context_switch(unsigned long pgd_pa,
-				 unsigned long tsb_reg,
-				 unsigned long tsb_vaddr,
-				 unsigned long tsb_pte,
+				 struct tsb_config *tsb_base,
+				 struct tsb_config *tsb_huge,
 				 unsigned long tsb_descr_pa);
 
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
-	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
-			     mm->context.tsb_map_vaddr,
-			     mm->context.tsb_map_pte,
-			     __pa(&mm->context.tsb_descr));
+	__tsb_context_switch(__pa(mm->pgd),
+			     &mm->context.tsb_block[0],
+#ifdef CONFIG_HUGETLB_PAGE
+			     (mm->context.tsb_block[1].tsb ?
+			      &mm->context.tsb_block[1] :
+			      NULL)
+#else
+			     NULL
+#endif
+			     , __pa(&mm->context.tsb_descr[0]));
 }
 
-extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss);
+extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
 #ifdef CONFIG_SMP
 extern void smp_tsb_sync(struct mm_struct *mm);
 #else
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index fcb2812..66fe4ac 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -30,6 +30,23 @@
 
 #ifdef __KERNEL__
 
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HPAGE_SHIFT		22
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HPAGE_SHIFT		19
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HPAGE_SHIFT		16
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
+#define HPAGE_MASK		(~(HPAGE_SIZE - 1UL))
+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+
 #ifndef __ASSEMBLY__
 
 extern void _clear_page(void *page);
@@ -90,23 +107,6 @@
 
 #endif /* (STRICT_MM_TYPECHECKS) */
 
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
-#define HPAGE_SHIFT		22
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-#define HPAGE_SHIFT		19
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define HPAGE_SHIFT		16
-#endif
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
-#define HPAGE_MASK		(~(HPAGE_SIZE - 1UL))
-#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_32BIT) ? \
 				 (_AC(0x0000000070000000,UL)) : \
 				 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index ed4124e..c44e746 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -105,6 +105,7 @@
 #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
 #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
+#define _PAGE_SZALL_4U	  _AC(0x6001000000000000,UL) /* All pgsz bits        */
 #define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
 #define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
 #define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
@@ -150,6 +151,7 @@
 #define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
 #define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
 #define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
+#define _PAGE_SZALL_4V	  _AC(0x0000000000000007,UL) /* All pgsz bits        */
 
 #if PAGE_SHIFT == 13
 #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U