[SPARC64]: Simplify TSB insert checks.

Don't try to avoid putting non-base page sized entries
into the user TSB.  It actually costs us more to check
this than it helps.

Eventually we'll have a multiple TSB scheme for user
processes.  Once a process starts using larger pages,
we'll allocate and use such a TSB.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 563852b..d738910 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -55,20 +55,6 @@
 	brgez,a,pn	%g5, tsb_do_fault
 	 TSB_STORE(%g1, %g7)
 
-	/* If it is larger than the base page size, don't
-	 * bother putting it into the TSB.
-	 */
-	sethi		%hi(_PAGE_ALL_SZ_BITS), %g7
-	ldx		[%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
-	and		%g5, %g7, %g2
-	sethi		%hi(_PAGE_SZBITS), %g7
-	ldx		[%g7 + %lo(_PAGE_SZBITS)], %g7
-	cmp		%g2, %g7
-	mov		1, %g7
-	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
-	bne,a,pn	%xcc, tsb_tlb_reload
-	 TSB_STORE(%g1, %g7)
-
 	TSB_WRITE(%g1, %g5, %g6)
 
 	/* Finally, load TLB and return from trap.  */
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 87d5d1a..5930e87 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -280,6 +280,8 @@
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
 	struct mm_struct *mm;
+	struct tsb *tsb;
+	unsigned long tag;
 
 	if (tlb_type != hypervisor) {
 		unsigned long pfn = pte_pfn(pte);
@@ -308,15 +310,10 @@
 	}
 
 	mm = vma->vm_mm;
-	if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
-		struct tsb *tsb;
-		unsigned long tag;
-
-		tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
-				       (mm->context.tsb_nentries - 1UL)];
-		tag = (address >> 22UL);
-		tsb_insert(tsb, tag, pte_val(pte));
-	}
+	tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
+			       (mm->context.tsb_nentries - 1UL)];
+	tag = (address >> 22UL);
+	tsb_insert(tsb, tag, pte_val(pte));
 }
 
 void flush_dcache_page(struct page *page)