Merge branch 'x86/asm' into x86/mm
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index b07278c..8a285f3 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -128,7 +128,7 @@
 #ifndef __ASSEMBLY__
 static inline int invalid_vm86_irq(int irq)
 {
-	return irq < 3 || irq > 15;
+	return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
 }
 #endif
 
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 84210c4..987a2c1 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -192,14 +192,26 @@
 					   unsigned size)
 {
 	might_sleep();
-	return __copy_user_nocache(dst, src, size, 1);
+	/*
+	 * In practice this limit means that large file write()s
+	 * which get chunked to 4K copies get handled via
+	 * non-temporal stores here. Smaller writes get handled
+	 * via regular __copy_from_user():
+	 */
+	if (likely(size >= PAGE_SIZE))
+		return __copy_user_nocache(dst, src, size, 1);
+	else
+		return __copy_from_user(dst, src, size);
 }
 
 static inline int __copy_from_user_inatomic_nocache(void *dst,
 						    const void __user *src,
 						    unsigned size)
 {
-	return __copy_user_nocache(dst, src, size, 0);
+	if (likely(size >= PAGE_SIZE))
+		return __copy_user_nocache(dst, src, size, 0);
+	else
+		return __copy_from_user_inatomic(dst, src, size);
 }
 
 unsigned long
diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
index 8b9171b..fbe66e6 100644
--- a/arch/x86/kernel/efi_stub_32.S
+++ b/arch/x86/kernel/efi_stub_32.S
@@ -113,6 +113,7 @@
 	movl	(%edx), %ecx
 	pushl	%ecx
 	ret
+ENDPROC(efi_call_phys)
 .previous
 
 .data
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
index 99b47d4..4c07cca 100644
--- a/arch/x86/kernel/efi_stub_64.S
+++ b/arch/x86/kernel/efi_stub_64.S
@@ -41,6 +41,7 @@
 	addq $32, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call0)
 
 ENTRY(efi_call1)
 	SAVE_XMM
@@ -50,6 +51,7 @@
 	addq $32, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call1)
 
 ENTRY(efi_call2)
 	SAVE_XMM
@@ -59,6 +61,7 @@
 	addq $32, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call2)
 
 ENTRY(efi_call3)
 	SAVE_XMM
@@ -69,6 +72,7 @@
 	addq $32, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call3)
 
 ENTRY(efi_call4)
 	SAVE_XMM
@@ -80,6 +84,7 @@
 	addq $32, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call4)
 
 ENTRY(efi_call5)
 	SAVE_XMM
@@ -92,6 +97,7 @@
 	addq $48, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call5)
 
 ENTRY(efi_call6)
 	SAVE_XMM
@@ -107,3 +113,4 @@
 	addq $48, %rsp
 	RESTORE_XMM
 	ret
+ENDPROC(efi_call6)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index dcf3128..83d1836 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -77,20 +77,17 @@
 	movq 8(%rbp), %rsi
 	subq $MCOUNT_INSN_SIZE, %rdi
 
-.globl ftrace_call
-ftrace_call:
+GLOBAL(ftrace_call)
 	call ftrace_stub
 
 	MCOUNT_RESTORE_FRAME
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
+GLOBAL(ftrace_graph_call)
 	jmp ftrace_stub
 #endif
 
-.globl ftrace_stub
-ftrace_stub:
+GLOBAL(ftrace_stub)
 	retq
 END(ftrace_caller)
 
@@ -110,8 +107,7 @@
 	jnz ftrace_graph_caller
 #endif
 
-.globl ftrace_stub
-ftrace_stub:
+GLOBAL(ftrace_stub)
 	retq
 
 trace:
@@ -148,9 +144,7 @@
 	retq
 END(ftrace_graph_caller)
 
-
-.globl return_to_handler
-return_to_handler:
+GLOBAL(return_to_handler)
 	subq  $80, %rsp
 
 	movq %rax, (%rsp)
@@ -188,6 +182,7 @@
 ENTRY(native_usergs_sysret64)
 	swapgs
 	sysretq
+ENDPROC(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 
@@ -633,16 +628,14 @@
  * Syscall return path ending with IRET.
  * Has correct top of stack, but partial stack frame.
  */
-	.globl int_ret_from_sys_call
-	.globl int_with_check
-int_ret_from_sys_call:
+GLOBAL(int_ret_from_sys_call)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	testl $3,CS-ARGOFFSET(%rsp)
 	je retint_restore_args
 	movl $_TIF_ALLWORK_MASK,%edi
 	/* edi:	mask to check */
-int_with_check:
+GLOBAL(int_with_check)
 	LOCKDEP_SYS_EXIT_IRQ
 	GET_THREAD_INFO(%rcx)
 	movl TI_flags(%rcx),%edx
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 2e648e3..54b29bb 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -329,8 +329,6 @@
 #endif /* CONFIG_EARLY_PRINTK */
 	.previous
 
-.balign PAGE_SIZE
-
 #define NEXT_PAGE(name) \
 	.balign	PAGE_SIZE; \
 ENTRY(name)
@@ -419,7 +417,7 @@
 	.section .bss, "aw", @nobits
 	.align L1_CACHE_BYTES
 ENTRY(idt_table)
-	.skip 256 * 16
+	.skip IDT_ENTRIES * 16
 
 	.section .bss.page_aligned, "aw", @nobits
 	.align PAGE_SIZE