blob: 190413d0de57b20a3334a9fcaaf8bcfbdd6fd14f [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_64_H
2#define _ASM_X86_UACCESS_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4/*
5 * User space memory access functions
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/compiler.h>
8#include <linux/errno.h>
Nick Piggin16dbc6c2008-10-02 14:50:12 -07009#include <linux/lockdep.h>
Jan Beulich1b1d9252009-12-18 16:12:56 +000010#include <asm/alternative.h>
11#include <asm/cpufeature.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/page.h>
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
15 * Copy To/From Userspace
16 */
17
18/* Handles exceptions in both to and from, but doesn't do access_ok */
Andi Kleen95912002006-09-26 10:52:39 +020019__must_check unsigned long
Fenghua Yu954e4822012-05-24 18:19:45 -070020copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21__must_check unsigned long
Jan Beulich1b1d9252009-12-18 16:12:56 +000022copy_user_generic_string(void *to, const void *from, unsigned len);
23__must_check unsigned long
24copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26static __always_inline __must_check unsigned long
27copy_user_generic(void *to, const void *from, unsigned len)
28{
29 unsigned ret;
30
Fenghua Yu954e4822012-05-24 18:19:45 -070031 /*
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
Jan Beulich1b1d9252009-12-18 16:12:56 +000037 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
Fenghua Yu954e4822012-05-24 18:19:45 -070039 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
Jan Beulich1b1d9252009-12-18 16:12:56 +000041 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
46}
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Andi Kleen95912002006-09-26 10:52:39 +020048__must_check unsigned long
Andi Kleen95912002006-09-26 10:52:39 +020049copy_in_user(void __user *to, const void __user *from, unsigned len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010051static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -070052int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -070053{
Andi Kleen383d0792006-09-26 10:52:40 +020054 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +010055
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -070057 return copy_user_generic(dst, (__force void *)src, size);
58 switch (size) {
59 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
60 ret, "b", "b", "=q", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -070062 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
63 ret, "w", "w", "=r", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -070065 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
66 ret, "l", "k", "=r", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -070068 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
69 ret, "q", "", "=r", 8);
70 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 case 10:
Joe Perchesb8963132008-03-23 01:03:49 -070072 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
Hiroshi Shimamoto20a4a232008-11-13 18:06:04 -080073 ret, "q", "", "=r", 10);
Joe Perchesb8963132008-03-23 01:03:49 -070074 if (unlikely(ret))
75 return ret;
76 __get_user_asm(*(u16 *)(8 + (char *)dst),
77 (u16 __user *)(8 + (char __user *)src),
78 ret, "w", "w", "=r", 2);
79 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 case 16:
Joe Perchesb8963132008-03-23 01:03:49 -070081 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
82 ret, "q", "", "=r", 16);
83 if (unlikely(ret))
84 return ret;
85 __get_user_asm(*(u64 *)(8 + (char *)dst),
86 (u64 __user *)(8 + (char __user *)src),
87 ret, "q", "", "=r", 8);
88 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 default:
Joe Perchesb8963132008-03-23 01:03:49 -070090 return copy_user_generic(dst, (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 }
Joe Perchesb8963132008-03-23 01:03:49 -070092}
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Andi Kleen95912002006-09-26 10:52:39 +020094static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -070095int __copy_from_user(void *dst, const void __user *src, unsigned size)
96{
97 might_fault();
98 return __copy_from_user_nocheck(dst, src, size);
99}
100
101static __always_inline __must_check
102int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700103{
Andi Kleen383d0792006-09-26 10:52:40 +0200104 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700107 return copy_user_generic((__force void *)dst, src, size);
108 switch (size) {
109 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
110 ret, "b", "b", "iq", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -0700112 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
113 ret, "w", "w", "ir", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -0700115 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
116 ret, "l", "k", "ir", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -0700118 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200119 ret, "q", "", "er", 8);
Joe Perchesb8963132008-03-23 01:03:49 -0700120 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 case 10:
Joe Perchesb8963132008-03-23 01:03:49 -0700122 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200123 ret, "q", "", "er", 10);
Joe Perchesb8963132008-03-23 01:03:49 -0700124 if (unlikely(ret))
125 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 asm("":::"memory");
Joe Perchesb8963132008-03-23 01:03:49 -0700127 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
128 ret, "w", "w", "ir", 2);
129 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 case 16:
Joe Perchesb8963132008-03-23 01:03:49 -0700131 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200132 ret, "q", "", "er", 16);
Joe Perchesb8963132008-03-23 01:03:49 -0700133 if (unlikely(ret))
134 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 asm("":::"memory");
Joe Perchesb8963132008-03-23 01:03:49 -0700136 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200137 ret, "q", "", "er", 8);
Joe Perchesb8963132008-03-23 01:03:49 -0700138 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700140 return copy_user_generic((__force void *)dst, src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 }
Joe Perchesb8963132008-03-23 01:03:49 -0700142}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Andi Kleen95912002006-09-26 10:52:39 +0200144static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700145int __copy_to_user(void __user *dst, const void *src, unsigned size)
146{
147 might_fault();
148 return __copy_to_user_nocheck(dst, src, size);
149}
150
151static __always_inline __must_check
Andi Kleen95912002006-09-26 10:52:39 +0200152int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700153{
Andi Kleen383d0792006-09-26 10:52:40 +0200154 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100155
156 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700158 return copy_user_generic((__force void *)dst,
159 (__force void *)src, size);
160 switch (size) {
161 case 1: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 u8 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700163 __get_user_asm(tmp, (u8 __user *)src,
164 ret, "b", "b", "=q", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700166 __put_user_asm(tmp, (u8 __user *)dst,
167 ret, "b", "b", "iq", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 return ret;
169 }
Joe Perchesb8963132008-03-23 01:03:49 -0700170 case 2: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 u16 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700172 __get_user_asm(tmp, (u16 __user *)src,
173 ret, "w", "w", "=r", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700175 __put_user_asm(tmp, (u16 __user *)dst,
176 ret, "w", "w", "ir", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 return ret;
178 }
179
Joe Perchesb8963132008-03-23 01:03:49 -0700180 case 4: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 u32 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700182 __get_user_asm(tmp, (u32 __user *)src,
183 ret, "l", "k", "=r", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700185 __put_user_asm(tmp, (u32 __user *)dst,
186 ret, "l", "k", "ir", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 return ret;
188 }
Joe Perchesb8963132008-03-23 01:03:49 -0700189 case 8: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 u64 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700191 __get_user_asm(tmp, (u64 __user *)src,
192 ret, "q", "", "=r", 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700194 __put_user_asm(tmp, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200195 ret, "q", "", "er", 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return ret;
197 }
198 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700199 return copy_user_generic((__force void *)dst,
200 (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 }
Joe Perchesb8963132008-03-23 01:03:49 -0700202}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Jan Beulich14722482009-11-13 11:56:24 +0000204static __must_check __always_inline int
205__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
206{
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700207 return __copy_from_user_nocheck(dst, (__force const void *)src, size);
Jan Beulich14722482009-11-13 11:56:24 +0000208}
Andi Kleenb8858082006-09-30 01:47:55 +0200209
210static __must_check __always_inline int
211__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
212{
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700213 return __copy_to_user_nocheck((__force void *)dst, src, size);
Andi Kleenb8858082006-09-30 01:47:55 +0200214}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Joe Perchesb8963132008-03-23 01:03:49 -0700216extern long __copy_user_nocache(void *dst, const void __user *src,
217 unsigned size, int zerorest);
Andi Kleen0812a572007-02-13 13:26:19 +0100218
Ingo Molnarf1800532009-03-02 11:00:57 +0100219static inline int
220__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100221{
Michael S. Tsirkin016be2e2013-05-26 17:31:55 +0300222 might_fault();
Ingo Molnarf1800532009-03-02 11:00:57 +0100223 return __copy_user_nocache(dst, src, size, 1);
Andi Kleen0812a572007-02-13 13:26:19 +0100224}
225
Ingo Molnarf1800532009-03-02 11:00:57 +0100226static inline int
227__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
228 unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100229{
Ingo Molnarf1800532009-03-02 11:00:57 +0100230 return __copy_user_nocache(dst, src, size, 0);
Andi Kleen0812a572007-02-13 13:26:19 +0100231}
232
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200233unsigned long
234copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
235
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700236#endif /* _ASM_X86_UACCESS_64_H */