MIPS: Optimise non-EVA kernel user memory accesses

Commits ac1d8590d3ae (MIPS: asm: uaccess: Use EVA instructions
wrappers), 05c6516005c4 (MIPS: asm: uaccess: Add EVA support to
copy_{in, to,from}_user) & e3a9b07a9caf (MIPS: asm: uaccess: Add EVA
support for str*_user operations) added checks to various user memory
access functions & macros in order to determine whether to perform
standard memory accesses or their EVA userspace equivalents. In kernels
built without support for EVA these checks are entirely redundant. Avoid
emitting them & allow the compiler to optimise out the EVA userspace
code in such kernels by checking config_enabled(CONFIG_EVA).

This reduces the size of a malta_defconfig kernel built using GCC 4.9.2
by approximately 33KB (from 5995072 to 5962304 bytes).

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/10165/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bf8b324..6ed061d 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -78,6 +78,21 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
+/*
+ * eva_kernel_access() - determine whether kernel memory access on an EVA system
+ *
+ * Determines whether memory accesses should be performed to kernel memory
+ * on a system using Extended Virtual Addressing (EVA).
+ *
+ * Return: true if a kernel memory access on an EVA system, else false.
+ */
+static inline bool eva_kernel_access(void)
+{
+	if (!config_enabled(CONFIG_EVA))
+		return false;
+
+	return segment_eq(get_fs(), get_ds());
+}
 
 /*
  * Is a address valid? This does a straighforward calculation rather
@@ -281,7 +296,7 @@
 ({									\
 	int __gu_err;							\
 									\
-	if (segment_eq(get_fs(), get_ds())) {				\
+	if (eva_kernel_access()) {					\
 		__get_kernel_common((x), size, ptr);			\
 	} else {							\
 		__chk_user_ptr(ptr);					\
@@ -297,7 +312,7 @@
 									\
 	might_fault();							\
 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
-		if (segment_eq(get_fs(), get_ds()))			\
+		if (eva_kernel_access())				\
 			__get_kernel_common((x), size, __gu_ptr);	\
 		else							\
 			__get_user_common((x), size, __gu_ptr);		\
@@ -422,7 +437,7 @@
 	int __pu_err = 0;						\
 									\
 	__pu_val = (x);							\
-	if (segment_eq(get_fs(), get_ds())) {				\
+	if (eva_kernel_access()) {					\
 		__put_kernel_common(ptr, size);				\
 	} else {							\
 		__chk_user_ptr(ptr);					\
@@ -439,7 +454,7 @@
 									\
 	might_fault();							\
 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
-		if (segment_eq(get_fs(), get_ds()))			\
+		if (eva_kernel_access())				\
 			__put_kernel_common(__pu_addr, size);		\
 		else							\
 			__put_user_common(__pu_addr, size);		\
@@ -833,7 +848,7 @@
 	__cu_from = (from);						\
 	__cu_len = (n);							\
 	might_fault();							\
-	if (segment_eq(get_fs(), get_ds()))				\
+	if (eva_kernel_access())					\
 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
 						   __cu_len);		\
 	else								\
@@ -853,7 +868,7 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (segment_eq(get_fs(), get_ds()))				\
+	if (eva_kernel_access())					\
 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
 						   __cu_len);		\
 	else								\
@@ -871,7 +886,7 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (segment_eq(get_fs(), get_ds()))				\
+	if (eva_kernel_access())					\
 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
 							      __cu_from,\
 							      __cu_len);\
@@ -904,7 +919,7 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (segment_eq(get_fs(), get_ds())) {				\
+	if (eva_kernel_access()) {					\
 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
 						   __cu_from,		\
 						   __cu_len);		\
@@ -1126,7 +1141,7 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (segment_eq(get_fs(), get_ds())) {				\
+	if (eva_kernel_access()) {					\
 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
 						     __cu_from,		\
 						     __cu_len);		\
@@ -1150,7 +1165,7 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (segment_eq(get_fs(), get_ds())) {				\
+	if (eva_kernel_access()) {					\
 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
 						    __cu_len);		\
 	} else {							\
@@ -1170,7 +1185,7 @@
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (segment_eq(get_fs(), get_ds())) {				\
+	if (eva_kernel_access()) {					\
 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
 						    __cu_len);		\
 	} else {							\
@@ -1250,7 +1265,7 @@
 {
 	long res;
 
-	if (segment_eq(get_fs(), get_ds())) {
+	if (eva_kernel_access()) {
 		__asm__ __volatile__(
 			"move\t$4, %1\n\t"
 			"move\t$5, %2\n\t"
@@ -1299,7 +1314,7 @@
 {
 	long res;
 
-	if (segment_eq(get_fs(), get_ds())) {
+	if (eva_kernel_access()) {
 		__asm__ __volatile__(
 			"move\t$4, %1\n\t"
 			"move\t$5, %2\n\t"
@@ -1343,7 +1358,7 @@
 {
 	long res;
 
-	if (segment_eq(get_fs(), get_ds())) {
+	if (eva_kernel_access()) {
 		__asm__ __volatile__(
 			"move\t$4, %1\n\t"
 			__MODULE_JAL(__strlen_kernel_asm)
@@ -1370,7 +1385,7 @@
 {
 	long res;
 
-	if (segment_eq(get_fs(), get_ds())) {
+	if (eva_kernel_access()) {
 		__asm__ __volatile__(
 			"move\t$4, %1\n\t"
 			"move\t$5, %2\n\t"
@@ -1411,7 +1426,7 @@
 	long res;
 
 	might_fault();
-	if (segment_eq(get_fs(), get_ds())) {
+	if (eva_kernel_access()) {
 		__asm__ __volatile__(
 			"move\t$4, %1\n\t"
 			"move\t$5, %2\n\t"