[SPARC64]: More TLB/TSB handling fixes.

The SUN4V convention with non-shared TSBs is that the context
bit of the TAG is clear.  So we have to choose an "invalid"
bit and initialize new TSBs appropriately.  Otherwise a zero
TAG looks "valid".

Make sure, for the window fixup cases, that we use the right
global registers and that we don't potentially trample on
the live global registers in etrap/rtrap handling (%g2 and
%g6) and that we put the missing virtual address properly
in %g5.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
index 2ef6f6e..09a6a15 100644
--- a/arch/sparc64/kernel/dtlb_miss.S
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -2,10 +2,10 @@
 	ldxa	[%g0] ASI_DMMU_TSB_8KB_PTR, %g1	! Get TSB 8K pointer
 	ldxa	[%g0] ASI_DMMU, %g6		! Get TAG TARGET
 	srlx	%g6, 48, %g5			! Get context
+	sllx	%g6, 22, %g6			! Zero out context
 	brz,pn	%g5, kvmap_dtlb			! Context 0 processing
-	 nop					! Delay slot (fill me)
+	 srlx	%g6, 22, %g6			! Delay slot
 	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
-	nop					! Push branch to next I$ line
 	cmp	%g4, %g6			! Compare TAG
 
 /* DTLB ** ICACHE line 2: TSB compare and TLB load	*/
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 730caa4..6dfe396 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -2,25 +2,25 @@
 	ldxa	[%g0] ASI_IMMU_TSB_8KB_PTR, %g1	! Get TSB 8K pointer
 	ldxa	[%g0] ASI_IMMU, %g6		! Get TAG TARGET
 	srlx	%g6, 48, %g5			! Get context
+	sllx	%g6, 22, %g6			! Zero out context
 	brz,pn	%g5, kvmap_itlb			! Context 0 processing
-	 nop					! Delay slot (fill me)
+	 srlx	%g6, 22, %g6			! Delay slot
 	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
 	cmp	%g4, %g6			! Compare TAG
-	sethi	%hi(PAGE_EXEC), %g4		! Setup exec check
 
 /* ITLB ** ICACHE line 2: TSB compare and TLB load	*/
+	sethi	%hi(PAGE_EXEC), %g4		! Setup exec check
 	ldx	[%g4 + %lo(PAGE_EXEC)], %g4
 	bne,pn	%xcc, tsb_miss_itlb		! Miss
 	 mov	FAULT_CODE_ITLB, %g3
 	andcc	%g5, %g4, %g0			! Executable?
 	be,pn	%xcc, tsb_do_fault
 	 nop					! Delay slot, fill me
-	stxa	%g5, [%g0] ASI_ITLB_DATA_IN	! Load TLB
-	retry					! Trap done
+	nop
 
 /* ITLB ** ICACHE line 3: 				*/
-	nop
-	nop
+	stxa	%g5, [%g0] ASI_ITLB_DATA_IN	! Load TLB
+	retry					! Trap done
 	nop
 	nop
 	nop
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 47dfd45..ac29da9 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -52,8 +52,10 @@
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, kvmap_itlb_longpath
-	 KTSB_STORE(%g1, %g0)
+	 KTSB_STORE(%g1, %g7)
 
 	KTSB_WRITE(%g1, %g5, %g6)
 
@@ -146,8 +148,10 @@
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, kvmap_dtlb_longpath
-	 KTSB_STORE(%g1, %g0)
+	 KTSB_STORE(%g1, %g7)
 
 	KTSB_WRITE(%g1, %g5, %g6)
 
@@ -215,8 +219,8 @@
 	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
 	.section .sun4v_2insn_patch, "ax"
 	.word	661b
-	nop
-	nop
+	SET_GL(1)
+	ldxa		[%g0] ASI_SCRATCHPAD, %g5
 	.previous
 
 	rdpr	%tl, %g3
@@ -226,7 +230,7 @@
 	ldxa	[%g4] ASI_DMMU, %g5
 	.section .sun4v_2insn_patch, "ax"
 	.word	661b
-	mov	%g4, %g5
+	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
 	nop
 	.previous
 
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index 244d50d..57ccdae 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -16,15 +16,14 @@
 	ldx	[BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \
 	ldx	[BASE + HV_FAULT_D_CTX_OFFSET], CTX;
 
-	/* DEST = (CTX << 48) | (VADDR >> 22)
+	/* DEST = (VADDR >> 22)
 	 *
 	 * Branch to ZERO_CTX_LABEL is context is zero.
 	 */
-#define	COMPUTE_TAG_TARGET(DEST, VADDR, CTX, TMP, ZERO_CTX_LABEL) \
-	srlx	VADDR, 22, TMP; \
-	sllx	CTX, 48, DEST; \
+#define	COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \
+	srlx	VADDR, 22, DEST; \
 	brz,pn	CTX, ZERO_CTX_LABEL; \
-	 or	DEST, TMP, DEST;
+	 nop;
 
 	/* Create TSB pointer.  This is something like:
 	 *
@@ -53,7 +52,7 @@
 	ldxa	[%g1] ASI_SCRATCHPAD, %g1
 
 	LOAD_ITLB_INFO(%g2, %g4, %g5)
-	COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_itlb_4v)
+	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
 	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
@@ -72,15 +71,15 @@
 	 *
 	 * %g3:	PTE
 	 * %g4:	vaddr
-	 * %g6:	TAG TARGET (only "CTX << 48" part matters)
 	 */
 sun4v_itlb_load:
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
 	mov	%o0, %g1		! save %o0
 	mov	%o1, %g2		! save %o1
 	mov	%o2, %g5		! save %o2
 	mov	%o3, %g7		! save %o3
 	mov	%g4, %o0		! vaddr
-	srlx	%g6, 48, %o1		! ctx
+	ldx	[%g6 + HV_FAULT_I_CTX_OFFSET], %o1	! ctx
 	mov	%g3, %o2		! PTE
 	mov	HV_MMU_IMMU, %o3	! flags
 	ta	HV_MMU_MAP_ADDR_TRAP
@@ -101,7 +100,7 @@
 	ldxa	[%g1] ASI_SCRATCHPAD, %g1
 
 	LOAD_DTLB_INFO(%g2, %g4, %g5)
-	COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v)
+	COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
 	COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7)
 
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
@@ -115,15 +114,15 @@
 	 *
 	 * %g3:	PTE
 	 * %g4:	vaddr
-	 * %g6:	TAG TARGET (only "CTX << 48" part matters)
 	 */
 sun4v_dtlb_load:
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
 	mov	%o0, %g1		! save %o0
 	mov	%o1, %g2		! save %o1
 	mov	%o2, %g5		! save %o2
 	mov	%o3, %g7		! save %o3
 	mov	%g4, %o0		! vaddr
-	srlx	%g6, 48, %o1		! ctx
+	ldx	[%g6 + HV_FAULT_D_CTX_OFFSET], %o1	! ctx
 	mov	%g3, %o2		! PTE
 	mov	HV_MMU_DMMU, %o3	! flags
 	ta	HV_MMU_MAP_ADDR_TRAP
@@ -136,16 +135,18 @@
 	retry
 
 sun4v_dtlb_prot:
+	SET_GL(1)
+
 	/* Load MMU Miss base into %g2.  */
-	ldxa	[%g0] ASI_SCRATCHPAD, %g2
+	ldxa	[%g0] ASI_SCRATCHPAD, %g5
 	
-	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
+	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
 	rdpr	%tl, %g1
 	cmp	%g1, 1
-	bgu,pn		%xcc, winfix_trampoline
+	bgu,pn	%xcc, winfix_trampoline
 	 nop
-	ba,pt		%xcc, sparc64_realfault_common
-	 mov		FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+	ba,pt	%xcc, sparc64_realfault_common
+	 mov	FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
 
 	/* Called from trap table with TAG TARGET placed into
 	 * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and
@@ -189,7 +190,8 @@
 	sethi	%hi(sun4v_err_itlb_vaddr), %g1
 	stx	%g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
 	sethi	%hi(sun4v_err_itlb_ctx), %g1
-	srlx	%g6, 48, %o1		! ctx
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
+	ldx	[%g6 + HV_FAULT_I_CTX_OFFSET], %o1
 	stx	%o1, [%g1 + %lo(sun4v_err_itlb_ctx)]
 	sethi	%hi(sun4v_err_itlb_pte), %g1
 	stx	%g3, [%g1 + %lo(sun4v_err_itlb_pte)]
@@ -214,7 +216,8 @@
 	sethi	%hi(sun4v_err_dtlb_vaddr), %g1
 	stx	%g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
 	sethi	%hi(sun4v_err_dtlb_ctx), %g1
-	srlx	%g6, 48, %o1		! ctx
+	ldxa	[%g0] ASI_SCRATCHPAD, %g6
+	ldx	[%g6 + HV_FAULT_D_CTX_OFFSET], %o1
 	stx	%o1, [%g1 + %lo(sun4v_err_dtlb_ctx)]
 	sethi	%hi(sun4v_err_dtlb_pte), %g1
 	stx	%g3, [%g1 + %lo(sun4v_err_dtlb_pte)]
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index a17259c..cc225c0 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -36,7 +36,7 @@
 	/* At this point we have:
 	 * %g4 --	missing virtual address
 	 * %g1 --	TSB entry address
-	 * %g6 --	TAG TARGET ((vaddr >> 22) | (ctx << 48))
+	 * %g6 --	TAG TARGET (vaddr >> 22)
 	 */
 tsb_miss_page_table_walk:
 	TRAP_LOAD_PGD_PHYS(%g7, %g5)
@@ -50,8 +50,10 @@
 
 	/* Load and check PTE.  */
 	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	brgez,a,pn	%g5, tsb_do_fault
-	 TSB_STORE(%g1, %g0)
+	 TSB_STORE(%g1, %g7)
 
 	/* If it is larger than the base page size, don't
 	 * bother putting it into the TSB.
@@ -62,8 +64,10 @@
 	sethi		%hi(_PAGE_SZBITS), %g7
 	ldx		[%g7 + %lo(_PAGE_SZBITS)], %g7
 	cmp		%g2, %g7
+	mov		1, %g7
+	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
-	 TSB_STORE(%g1, %g0)
+	 TSB_STORE(%g1, %g7)
 
 	TSB_WRITE(%g1, %g5, %g6)
 
@@ -136,7 +140,7 @@
 	.section	.sun4v_2insn_patch, "ax"
 	.word		661b
 	SET_GL(1)
-	ldxa		[%g0] ASI_SCRATCHPAD, %g2
+	ldxa		[%g0] ASI_SCRATCHPAD, %g4
 	.previous
 
 	bne,pn		%xcc, tsb_do_itlb_fault
@@ -150,7 +154,7 @@
 	ldxa	[%g4] ASI_DMMU, %g5
 	.section .sun4v_2insn_patch, "ax"
 	.word	661b
-	ldx	[%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
+	ldx	[%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
 	nop
 	.previous
 
@@ -217,8 +221,9 @@
 	bne,pn	%icc, 1b
 	 membar	#LoadLoad
 	cmp	%g1, %o1
+	mov	1, %o3
 	bne,pt	%xcc, 2f
-	 clr	%o3
+	 sllx	%o3, TSB_TAG_INVALID_BIT, %o3
 	TSB_CAS_TAG(%o0, %g1, %o3)
 	cmp	%g1, %o3
 	bne,pn	%xcc, 1b
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index bd9e320..aa2aec6 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -296,7 +296,7 @@
 
 		tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
 				       (mm->context.tsb_nentries - 1UL)];
-		tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL;
+		tag = (address >> 22UL);
 		tsb_insert(tsb, tag, pte_val(pte));
 	}
 }
@@ -1110,6 +1110,8 @@
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
+	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
+
 	if (tlb_type == hypervisor)
 		sun4v_pgprot_init();
 	else
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 3c1ff05..353cb06 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -20,9 +20,9 @@
 	return vaddr & (nentries - 1);
 }
 
-static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
+static inline int tag_compare(unsigned long tag, unsigned long vaddr)
 {
-	return (tag == ((vaddr >> 22) | (context << 48)));
+	return (tag == (vaddr >> 22));
 }
 
 /* TSB flushes need only occur on the processor initiating the address
@@ -38,8 +38,8 @@
 		unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
 		struct tsb *ent = &swapper_tsb[hash];
 
-		if (tag_compare(ent->tag, v, 0)) {
-			ent->tag = 0UL;
+		if (tag_compare(ent->tag, v)) {
+			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
 			membar_storeload_storestore();
 		}
 	}
@@ -50,14 +50,9 @@
 	struct mm_struct *mm = mp->mm;
 	struct tsb *tsb = mm->context.tsb;
 	unsigned long nentries = mm->context.tsb_nentries;
-	unsigned long ctx, base;
+	unsigned long base;
 	int i;
 
-	if (unlikely(!CTX_VALID(mm->context)))
-		return;
-
-	ctx = CTX_HWBITS(mm->context);
-
 	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 		base = __pa(tsb);
 	else
@@ -71,7 +66,7 @@
 
 		hash = tsb_hash(v, nentries);
 		ent = base + (hash * sizeof(struct tsb));
-		tag = (v >> 22UL) | (ctx << 48UL);
+		tag = (v >> 22UL);
 
 		tsb_flush(ent, tag);
 	}
@@ -243,7 +238,8 @@
 				  "i" (ASI_NUCLEUS_QUAD_LDD));
 		}
 
-		if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
+		if (tag & ((1UL << TSB_TAG_LOCK_BIT) |
+			   (1UL << TSB_TAG_INVALID_BIT)))
 			continue;
 
 		/* We only put base page size PTEs into the TSB,
@@ -315,10 +311,13 @@
 			break;
 	}
 
-	page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
+	page = alloc_pages(gfp_flags, get_order(size));
 	if (unlikely(!page))
 		return;
 
+	/* Mark all tags as invalid.  */
+	memset(page_address(page), 0x40, size);
+
 	if (size == max_tsb_size)
 		mm->context.tsb_rss_limit = ~0UL;
 	else