1 #ifndef __M68K_UACCESS_H
2 #define __M68K_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <asm/segment.h>
14 #define VERIFY_WRITE 1
16 /* Coldifre doesn't have moves instruction; use move. */
17 #ifdef CONFIG_COLDFIRE
21 /* We let the MMU do all checking */
22 static inline int access_ok(int type, const void __user *addr,
29 * The exception table consists of pairs of addresses: the first is the
30 * address of an instruction that is allowed to fault, and the second is
31 * the address at which the program should continue. No registers are
32 * modified, so it is entirely up to the continuation code to figure out
35 * All the routines below use bits of fixup code that are out of line
36 * with the main instruction path. This means when everything is well,
37 * we don't even have to jump over them. Further, they do not intrude
38 * on our cache or tlb entries.
41 struct exception_table_entry
43 unsigned long insn, fixup;
46 extern int __put_user_bad(void);
47 extern int __get_user_bad(void);
49 #define __put_user_asm(res, x, ptr, bwl, reg, err) \
51 "1: moves."#bwl" %2,%1\n" \
53 " .section .fixup,\"ax\"\n" \
55 "10: moveq.l %3,%0\n" \
59 " .section __ex_table,\"a\"\n" \
64 : "+d" (res), "=m" (*(ptr)) \
65 : #reg (x), "i" (err))
68 * These are the main single-value transfer routines. They automatically
69 * use the right size if we just have the right pointer type.
72 #define __put_user(x, ptr) \
74 typeof(*(ptr)) __pu_val = (x); \
76 __chk_user_ptr(ptr); \
77 switch (sizeof (*(ptr))) { \
79 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
82 __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
85 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
89 const void __user *__pu_ptr = (ptr); \
91 "1: moves.l %2,(%1)+\n" \
92 "2: moves.l %R2,(%1)\n" \
94 " .section .fixup,\"ax\"\n" \
100 " .section __ex_table,\"a\"\n" \
106 : "+d" (__pu_err), "+a" (__pu_ptr) \
107 : "r" (__pu_val), "i" (-EFAULT) \
112 __pu_err = __put_user_bad(); \
117 #define put_user(x, ptr) __put_user(x, ptr)
120 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
123 "1: moves."#bwl" %2,%1\n" \
125 " .section .fixup,\"ax\"\n" \
127 "10: move.l %3,%0\n" \
128 " sub."#bwl" %1,%1\n" \
132 " .section __ex_table,\"a\"\n" \
136 : "+d" (res), "=&" #reg (__gu_val) \
137 : "m" (*(ptr)), "i" (err)); \
138 (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
141 #define __get_user(x, ptr) \
144 __chk_user_ptr(ptr); \
145 switch (sizeof(*(ptr))) { \
147 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
150 __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
153 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
155 /* case 8: disabled because gcc-4.1 has a broken typeof \
157 const void *__gu_ptr = (ptr); \
160 "1: moves.l (%2)+,%1\n" \
161 "2: moves.l (%2),%R1\n" \
163 " .section .fixup,\"ax\"\n" \
165 "10: move.l %3,%0\n" \
171 " .section __ex_table,\"a\"\n" \
176 : "+d" (__gu_err), "=&r" (__gu_val), \
180 (x) = (typeof(*(ptr)))__gu_val; \
184 __gu_err = __get_user_bad(); \
189 #define get_user(x, ptr) __get_user(x, ptr)
191 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
192 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
194 #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
196 "1: moves."#s1" (%2)+,%3\n" \
197 " move."#s1" %3,(%1)+\n" \
198 "2: moves."#s2" (%2)+,%3\n" \
199 " move."#s2" %3,(%1)+\n" \
200 " .ifnc \""#s3"\",\"\"\n" \
201 "3: moves."#s3" (%2)+,%3\n" \
202 " move."#s3" %3,(%1)+\n" \
205 " .section __ex_table,\"a\"\n" \
209 " .ifnc \""#s3"\",\"\"\n" \
214 " .section .fixup,\"ax\"\n" \
216 "10: clr."#s1" (%1)+\n" \
217 "20: clr."#s2" (%1)+\n" \
218 " .ifnc \""#s3"\",\"\"\n" \
219 "30: clr."#s3" (%1)+\n" \
221 " moveq.l #"#n",%0\n" \
224 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
227 static __always_inline unsigned long
228 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
230 unsigned long res = 0, tmp;
234 __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
237 __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2);
240 __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
243 __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
246 __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
249 __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
252 __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
255 __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
258 __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
261 __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
264 __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
267 /* we limit the inlined version to 3 moves */
268 return __generic_copy_from_user(to, from, n);
274 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
276 " move."#s1" (%2)+,%3\n" \
277 "11: moves."#s1" %3,(%1)+\n" \
278 "12: move."#s2" (%2)+,%3\n" \
279 "21: moves."#s2" %3,(%1)+\n" \
281 " .ifnc \""#s3"\",\"\"\n" \
282 " move."#s3" (%2)+,%3\n" \
283 "31: moves."#s3" %3,(%1)+\n" \
288 " .section __ex_table,\"a\"\n" \
294 " .ifnc \""#s3"\",\"\"\n" \
300 " .section .fixup,\"ax\"\n" \
302 "5: moveq.l #"#n",%0\n" \
305 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
308 static __always_inline unsigned long
309 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
311 unsigned long res = 0, tmp;
315 __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
318 __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
321 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
324 __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
327 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
330 __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
333 __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
336 __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
339 __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
342 __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
345 __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
348 /* limit the inlined version to 3 moves */
349 return __generic_copy_to_user(to, from, n);
355 #define __copy_from_user(to, from, n) \
356 (__builtin_constant_p(n) ? \
357 __constant_copy_from_user(to, from, n) : \
358 __generic_copy_from_user(to, from, n))
360 #define __copy_to_user(to, from, n) \
361 (__builtin_constant_p(n) ? \
362 __constant_copy_to_user(to, from, n) : \
363 __generic_copy_to_user(to, from, n))
365 #define __copy_to_user_inatomic __copy_to_user
366 #define __copy_from_user_inatomic __copy_from_user
368 #define copy_from_user(to, from, n) __copy_from_user(to, from, n)
369 #define copy_to_user(to, from, n) __copy_to_user(to, from, n)
371 long strncpy_from_user(char *dst, const char __user *src, long count);
372 long strnlen_user(const char __user *src, long n);
373 unsigned long __clear_user(void __user *to, unsigned long n);
375 #define clear_user __clear_user
377 #define strlen_user(str) strnlen_user(str, 32767)
379 #endif /* _M68K_UACCESS_H */