blob: 2eedf6f46a57c89ed6ed728065bb3a160dae5503 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 userspace access functions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14/*
15 * User space memory access functions
16 */
David Howells7c7fcf72010-10-27 17:29:01 +010017#include <linux/thread_info.h>
David Howells35052cf2011-06-21 10:29:51 +010018#include <linux/kernel.h>
David Howellsb920de12008-02-08 04:19:31 -080019#include <asm/page.h>
David Howellsb920de12008-02-08 04:19:31 -080020#include <asm/errno.h>
21
22#define VERIFY_READ 0
23#define VERIFY_WRITE 1
24
25/*
26 * The fs value determines whether argument validity checking should be
27 * performed or not. If get_fs() == USER_DS, checking is performed, with
28 * get_fs() == KERNEL_DS, checking is bypassed.
29 *
30 * For historical reasons, these macros are grossly misnamed.
31 */
David Howellsb920de12008-02-08 04:19:31 -080032#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
35#define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
36#define USER_DS MAKE_MM_SEG(TASK_SIZE)
37
38#define get_ds() (KERNEL_DS)
39#define get_fs() (current_thread_info()->addr_limit)
40#define set_fs(x) (current_thread_info()->addr_limit = (x))
David Howellsb920de12008-02-08 04:19:31 -080041
42#define segment_eq(a, b) ((a).seg == (b).seg)
43
44#define __addr_ok(addr) \
45 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
46
47/*
48 * check that a range of addresses falls within the current address limit
49 */
50static inline int ___range_ok(unsigned long addr, unsigned int size)
51{
52 int flag = 1, tmp;
53
54 asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */
55 " bcs 0f \n"
56 " cmp %4,%1 \n" /* jump if addr+size>limit (error) */
57 " bhi 0f \n"
58 " clr %0 \n" /* mark okay */
59 "0: \n"
60 : "=r"(flag), "=&r"(tmp)
61 : "1"(addr), "ir"(size),
62 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
63 : "cc"
64 );
65
66 return flag;
67}
68
69#define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
70
71#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
72#define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
73
David Howellsb920de12008-02-08 04:19:31 -080074/*
75 * The exception table consists of pairs of addresses: the first is the
76 * address of an instruction that is allowed to fault, and the second is
77 * the address at which the program should continue. No registers are
78 * modified, so it is entirely up to the continuation code to figure out
79 * what to do.
80 *
81 * All the routines below use bits of fixup code that are out of line
82 * with the main instruction path. This means when everything is well,
83 * we don't even have to jump over them. Further, they do not intrude
84 * on our cache or tlb entries.
85 */
86
87struct exception_table_entry
88{
89 unsigned long insn, fixup;
90};
91
92/* Returns 0 if exception not found and fixup otherwise. */
93extern int fixup_exception(struct pt_regs *regs);
94
95#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
96#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
97
98/*
99 * The "__xxx" versions do not do address space checking, useful when
100 * doing multiple accesses to the same area (the user has to do the
101 * checks by hand with "access_ok()")
102 */
103#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
104#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
105
David Howellsb920de12008-02-08 04:19:31 -0800106struct __large_struct { unsigned long buf[100]; };
107#define __m(x) (*(struct __large_struct *)(x))
108
Mark Salterd22a0012009-10-01 15:44:01 -0700109#define __get_user_nocheck(x, ptr, size) \
110({ \
111 unsigned long __gu_addr; \
112 int __gu_err; \
113 __gu_addr = (unsigned long) (ptr); \
114 switch (size) { \
115 case 1: { \
116 unsigned char __gu_val; \
117 __get_user_asm("bu"); \
118 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
119 break; \
120 } \
121 case 2: { \
122 unsigned short __gu_val; \
123 __get_user_asm("hu"); \
124 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
125 break; \
126 } \
127 case 4: { \
128 unsigned int __gu_val; \
129 __get_user_asm(""); \
130 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
131 break; \
132 } \
133 default: \
134 __get_user_unknown(); \
135 break; \
136 } \
137 __gu_err; \
David Howellsb920de12008-02-08 04:19:31 -0800138})
139
Mark Salterd22a0012009-10-01 15:44:01 -0700140#define __get_user_check(x, ptr, size) \
141({ \
Akira Takeuchic6dc9f02013-06-28 16:53:01 +0100142 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
Mark Salterd22a0012009-10-01 15:44:01 -0700143 int _e; \
Tkhai Kirill6fc34432011-03-14 13:27:46 +0000144 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
145 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
Mark Salterd22a0012009-10-01 15:44:01 -0700146 else { \
147 _e = -EFAULT; \
148 (x) = (__typeof__(x))0; \
149 } \
150 _e; \
David Howellsb920de12008-02-08 04:19:31 -0800151})
152
153#define __get_user_asm(INSN) \
154({ \
155 asm volatile( \
156 "1:\n" \
157 " mov"INSN" %2,%1\n" \
158 " mov 0,%0\n" \
159 "2:\n" \
160 " .section .fixup,\"ax\"\n" \
161 "3:\n\t" \
Al Viro43403ea2016-08-20 16:32:02 -0400162 " mov 0,%1\n" \
David Howellsb920de12008-02-08 04:19:31 -0800163 " mov %3,%0\n" \
164 " jmp 2b\n" \
165 " .previous\n" \
166 " .section __ex_table,\"a\"\n" \
167 " .balign 4\n" \
168 " .long 1b, 3b\n" \
169 " .previous" \
170 : "=&r" (__gu_err), "=&r" (__gu_val) \
171 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
172})
173
174extern int __get_user_unknown(void);
175
176#define __put_user_nocheck(x, ptr, size) \
177({ \
178 union { \
179 __typeof__(*(ptr)) val; \
180 u32 bits[2]; \
181 } __pu_val; \
182 unsigned long __pu_addr; \
183 int __pu_err; \
184 __pu_val.val = (x); \
185 __pu_addr = (unsigned long) (ptr); \
186 switch (size) { \
187 case 1: __put_user_asm("bu"); break; \
188 case 2: __put_user_asm("hu"); break; \
189 case 4: __put_user_asm("" ); break; \
190 case 8: __put_user_asm8(); break; \
191 default: __pu_err = __put_user_unknown(); break; \
192 } \
193 __pu_err; \
194})
195
196#define __put_user_check(x, ptr, size) \
197({ \
198 union { \
199 __typeof__(*(ptr)) val; \
200 u32 bits[2]; \
201 } __pu_val; \
202 unsigned long __pu_addr; \
203 int __pu_err; \
204 __pu_val.val = (x); \
205 __pu_addr = (unsigned long) (ptr); \
206 if (likely(__access_ok(__pu_addr, size))) { \
207 switch (size) { \
208 case 1: __put_user_asm("bu"); break; \
209 case 2: __put_user_asm("hu"); break; \
210 case 4: __put_user_asm("" ); break; \
211 case 8: __put_user_asm8(); break; \
212 default: __pu_err = __put_user_unknown(); break; \
213 } \
214 } \
215 else { \
216 __pu_err = -EFAULT; \
217 } \
218 __pu_err; \
219})
220
221#define __put_user_asm(INSN) \
222({ \
223 asm volatile( \
224 "1:\n" \
225 " mov"INSN" %1,%2\n" \
226 " mov 0,%0\n" \
227 "2:\n" \
228 " .section .fixup,\"ax\"\n" \
229 "3:\n" \
230 " mov %3,%0\n" \
231 " jmp 2b\n" \
232 " .previous\n" \
233 " .section __ex_table,\"a\"\n" \
234 " .balign 4\n" \
235 " .long 1b, 3b\n" \
236 " .previous" \
237 : "=&r" (__pu_err) \
238 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
239 "i" (-EFAULT) \
240 ); \
241})
242
243#define __put_user_asm8() \
244({ \
245 asm volatile( \
246 "1: mov %1,%3 \n" \
247 "2: mov %2,%4 \n" \
248 " mov 0,%0 \n" \
249 "3: \n" \
250 " .section .fixup,\"ax\" \n" \
251 "4: \n" \
252 " mov %5,%0 \n" \
Akira Takeuchi54b71fb2008-12-10 12:43:34 +0000253 " jmp 3b \n" \
David Howellsb920de12008-02-08 04:19:31 -0800254 " .previous \n" \
255 " .section __ex_table,\"a\"\n" \
256 " .balign 4 \n" \
257 " .long 1b, 4b \n" \
258 " .long 2b, 4b \n" \
259 " .previous \n" \
260 : "=&r" (__pu_err) \
261 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
262 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
263 "i" (-EFAULT) \
264 ); \
265})
266
267extern int __put_user_unknown(void);
268
269
270/*
271 * Copy To/From Userspace
272 */
273/* Generic arbitrary sized copy. */
274#define __copy_user(to, from, size) \
275do { \
276 if (size) { \
277 void *__to = to; \
278 const void *__from = from; \
279 int w; \
280 asm volatile( \
281 "0: movbu (%0),%3;\n" \
282 "1: movbu %3,(%1);\n" \
283 " inc %0;\n" \
284 " inc %1;\n" \
285 " add -1,%2;\n" \
286 " bne 0b;\n" \
287 "2:\n" \
288 " .section .fixup,\"ax\"\n" \
289 "3: jmp 2b\n" \
290 " .previous\n" \
291 " .section __ex_table,\"a\"\n" \
292 " .balign 4\n" \
293 " .long 0b,3b\n" \
294 " .long 1b,3b\n" \
295 " .previous\n" \
296 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
297 : "0"(__from), "1"(__to), "2"(size) \
Mark Salterd6bb7a12010-01-08 14:43:16 -0800298 : "cc", "memory"); \
David Howellsb920de12008-02-08 04:19:31 -0800299 } \
300} while (0)
301
302#define __copy_user_zeroing(to, from, size) \
303do { \
304 if (size) { \
305 void *__to = to; \
306 const void *__from = from; \
307 int w; \
308 asm volatile( \
309 "0: movbu (%0),%3;\n" \
310 "1: movbu %3,(%1);\n" \
311 " inc %0;\n" \
312 " inc %1;\n" \
313 " add -1,%2;\n" \
314 " bne 0b;\n" \
315 "2:\n" \
316 " .section .fixup,\"ax\"\n" \
317 "3:\n" \
318 " mov %2,%0\n" \
319 " clr %3\n" \
320 "4: movbu %3,(%1);\n" \
321 " inc %1;\n" \
322 " add -1,%2;\n" \
323 " bne 4b;\n" \
324 " mov %0,%2\n" \
325 " jmp 2b\n" \
326 " .previous\n" \
327 " .section __ex_table,\"a\"\n" \
328 " .balign 4\n" \
329 " .long 0b,3b\n" \
330 " .long 1b,3b\n" \
331 " .previous\n" \
332 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
333 : "0"(__from), "1"(__to), "2"(size) \
Mark Salterd6bb7a12010-01-08 14:43:16 -0800334 : "cc", "memory"); \
David Howellsb920de12008-02-08 04:19:31 -0800335 } \
336} while (0)
337
338/* We let the __ versions of copy_from/to_user inline, because they're often
339 * used in fast paths and have only a small space overhead.
340 */
341static inline
342unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
343 unsigned long n)
344{
345 __copy_user_zeroing(to, from, n);
346 return n;
347}
348
349static inline
350unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
351 unsigned long n)
352{
353 __copy_user(to, from, n);
354 return n;
355}
356
357
358#if 0
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100359#error "don't use - these macros don't increment to & from pointers"
David Howellsb920de12008-02-08 04:19:31 -0800360/* Optimize just a little bit when we know the size of the move. */
361#define __constant_copy_user(to, from, size) \
362do { \
363 asm volatile( \
364 " mov %0,a0;\n" \
365 "0: movbu (%1),d3;\n" \
366 "1: movbu d3,(%2);\n" \
367 " add -1,a0;\n" \
368 " bne 0b;\n" \
369 "2:;" \
370 ".section .fixup,\"ax\"\n" \
371 "3: jmp 2b\n" \
372 ".previous\n" \
373 ".section __ex_table,\"a\"\n" \
374 " .balign 4\n" \
375 " .long 0b,3b\n" \
376 " .long 1b,3b\n" \
377 ".previous" \
378 : \
379 : "d"(size), "d"(to), "d"(from) \
380 : "d3", "a0"); \
381} while (0)
382
383/* Optimize just a little bit when we know the size of the move. */
384#define __constant_copy_user_zeroing(to, from, size) \
385do { \
386 asm volatile( \
387 " mov %0,a0;\n" \
388 "0: movbu (%1),d3;\n" \
389 "1: movbu d3,(%2);\n" \
390 " add -1,a0;\n" \
391 " bne 0b;\n" \
392 "2:;" \
393 ".section .fixup,\"ax\"\n" \
394 "3: jmp 2b\n" \
395 ".previous\n" \
396 ".section __ex_table,\"a\"\n" \
397 " .balign 4\n" \
398 " .long 0b,3b\n" \
399 " .long 1b,3b\n" \
400 ".previous" \
401 : \
402 : "d"(size), "d"(to), "d"(from) \
403 : "d3", "a0"); \
404} while (0)
405
406static inline
407unsigned long __constant_copy_to_user(void *to, const void *from,
408 unsigned long n)
409{
410 if (access_ok(VERIFY_WRITE, to, n))
411 __constant_copy_user(to, from, n);
412 return n;
413}
414
415static inline
416unsigned long __constant_copy_from_user(void *to, const void *from,
417 unsigned long n)
418{
419 if (access_ok(VERIFY_READ, from, n))
420 __constant_copy_user_zeroing(to, from, n);
421 return n;
422}
423
424static inline
425unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
426 unsigned long n)
427{
428 __constant_copy_user(to, from, n);
429 return n;
430}
431
432static inline
433unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
434 unsigned long n)
435{
436 __constant_copy_user_zeroing(to, from, n);
437 return n;
438}
439#endif
440
441extern unsigned long __generic_copy_to_user(void __user *, const void *,
442 unsigned long);
443extern unsigned long __generic_copy_from_user(void *, const void __user *,
444 unsigned long);
445
446#define __copy_to_user_inatomic(to, from, n) \
447 __generic_copy_to_user_nocheck((to), (from), (n))
448#define __copy_from_user_inatomic(to, from, n) \
449 __generic_copy_from_user_nocheck((to), (from), (n))
450
451#define __copy_to_user(to, from, n) \
452({ \
Michael S. Tsirkin3837a3c2013-05-26 17:31:05 +0300453 might_fault(); \
David Howellsb920de12008-02-08 04:19:31 -0800454 __copy_to_user_inatomic((to), (from), (n)); \
455})
456
457#define __copy_from_user(to, from, n) \
458({ \
Michael S. Tsirkin3837a3c2013-05-26 17:31:05 +0300459 might_fault(); \
David Howellsb920de12008-02-08 04:19:31 -0800460 __copy_from_user_inatomic((to), (from), (n)); \
461})
462
463
464#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
465#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
466
467extern long strncpy_from_user(char *dst, const char __user *src, long count);
468extern long __strncpy_from_user(char *dst, const char __user *src, long count);
469extern long strnlen_user(const char __user *str, long n);
470#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
471extern unsigned long clear_user(void __user *mem, unsigned long len);
472extern unsigned long __clear_user(void __user *mem, unsigned long len);
473
474#endif /* _ASM_UACCESS_H */