[SPARC64]: Access TSB with physical addresses when possible.

This way we don't need to lock the TSB into the TLB.
The trick is that every TSB load/store is registered into
a special instruction patch section.  The default uses
virtual addresses, and the patch instructions use physical
address load/stores.

We can't do this on all chips because only cheetah+ and later
have the physical variant of the atomic quad load.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index e1dd37f..ff6a79b 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -53,7 +53,7 @@
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 	brgez,a,pn	%g5, tsb_do_fault
-	 stx		%g0, [%g1]
+	 TSB_STORE(%g1, %g0)
 
 	/* If it is larger than the base page size, don't
 	 * bother putting it into the TSB.
@@ -64,7 +64,7 @@
 	and		%g2, %g4, %g2
 	cmp		%g2, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
-	 stx		%g0, [%g1]
+	 TSB_STORE(%g1, %g0)
 
 	TSB_WRITE(%g1, %g5, %g6)
 
@@ -131,13 +131,13 @@
 
 	/* Insert an entry into the TSB.
 	 *
-	 * %o0: TSB entry pointer
+	 * %o0: TSB entry pointer (virt or phys address)
 	 * %o1: tag
 	 * %o2:	pte
 	 */
 	.align	32
-	.globl	tsb_insert
-tsb_insert:
+	.globl	__tsb_insert
+__tsb_insert:
 	rdpr	%pstate, %o5
 	wrpr	%o5, PSTATE_IE, %pstate
 	TSB_LOCK_TAG(%o0, %g2, %g3)
@@ -146,6 +146,31 @@
 	retl
 	 nop
 
+	/* Flush the given TSB entry if it has the matching
+	 * tag.
+	 *
+	 * %o0: TSB entry pointer (virt or phys address)
+	 * %o1:	tag
+	 */
+	.align	32
+	.globl	tsb_flush
+tsb_flush:
+	sethi	%hi(TSB_TAG_LOCK_HIGH), %g2
+1:	TSB_LOAD_TAG(%o0, %g1)
+	srlx	%g1, 32, %o3
+	andcc	%o3, %g2, %g0
+	bne,pn	%icc, 1b
+	 membar	#LoadLoad
+	cmp	%g1, %o1
+	bne,pt	%xcc, 2f
+	 clr	%o3
+	TSB_CAS_TAG(%o0, %g1, %o3)
+	cmp	%g1, %o3
+	bne,pn	%xcc, 1b
+	 nop
+2:	retl
+	 TSB_MEMBAR
+
 	/* Reload MMU related context switch state at
 	 * schedule() time.
 	 *