[XTENSA] Add support for cache-aliasing

Add support for processors that have cache-aliasing issues, such as
the Stretch S5000 processor. Cache-aliasing means that the size of
the cache (for one way) is larger than the page size, thus, a page
can end up in several places in cache depending on the virtual to
physical translation. The method used here is to map a user page
temporarily through the auto-refill way 0 and of of the DTLB.
We probably will want to revisit this issue and use a better
approach with kmap/kunmap.

Signed-off-by: Chris Zankel <chris@zankel.net>
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index 667a6c4..c0fcc1c 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -1,5 +1,5 @@
 /*
- * linux/include/asm-xtensa/pgtable.h
+ * include/asm-xtensa/pgtable.h
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -60,16 +60,20 @@
 #define FIRST_USER_ADDRESS	0
 #define FIRST_USER_PGD_NR	(FIRST_USER_ADDRESS >> PGDIR_SHIFT)
 
-/* virtual memory area. We keep a distance to other memory regions to be
+/*
+ * Virtual memory area. We keep a distance to other memory regions to be
  * on the safe side. We also use this area for cache aliasing.
  */
 
-// FIXME: virtual memory area must be configuration-dependent
-
 #define VMALLOC_START		0xC0000000
-#define VMALLOC_END		0xC7FF0000
+#define VMALLOC_END		0xC6FEFFFF
+#define TLBTEMP_BASE_1		0xC6FF0000
+#define TLBTEMP_BASE_2		0xC6FF8000
+#define MODULE_START		0xC7000000
+#define MODULE_END		0xC7FFFFFF
 
-/* Xtensa Linux config PTE layout (when present):
+/*
+ * Xtensa Linux config PTE layout (when present):
  *	31-12:	PPN
  *	11-6:	Software
  *	5-4:	RING
@@ -126,12 +130,13 @@
 #define PAGE_SHARED	   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
 #define PAGE_SHARED_EXEC \
 	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
-#define PAGE_KERNEL	   __pgprot(_PAGE_PRESENT)
+#define PAGE_KERNEL	   __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_EXEC   __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
 
 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
-# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_HW_WRITE)
+# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
 #else
-# define _PAGE_DIRECTORY (_PAGE_VALID|_PAGE_ACCESSED|_PAGE_HW_WRITE|_PAGE_CA_WB)
+# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
 #endif
 
 #else /* no mmu */
@@ -244,6 +249,10 @@
 static inline void update_pte(pte_t *ptep, pte_t pteval)
 {
 	*ptep = pteval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+	__asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
+#endif
+
 }
 
 struct mm_struct;
@@ -383,13 +392,12 @@
  * remap a physical page `pfn' of size `size' with page protection `prot'
  * into virtual address `from'
  */
+
 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
                 remap_pfn_range(vma, from, pfn, size, prot)
 
 
-/* No page table caches to init */
-
-#define pgtable_cache_init()	do { } while (0)
+extern void pgtable_cache_init(void);
 
 typedef pte_t *pte_addr_t;