Changed magic number from 0xE5 to I_VMMPC_POSTED
[akaros.git] / kern / arch / x86 / uaccess.h
1 /* Copyright (c) 2015 Google Inc
2  * Davide Libenzi <dlibenzi@google.com>
3  * See LICENSE for details.
4  *
5  * Part of this code coming from a Linux kernel file:
6  *
7  * linux/arch/x86/include/asm/uaccess.h
8  *
9  * Which, even though missing specific copyright, it is supposed to be
10  * ruled by the overall Linux copyright.
11  */
12
13 #pragma once
14
15 #include <ros/errno.h>
16 #include <compiler.h>
17 #include <stdint.h>
18 #include <umem.h>
19
20 #define ASM_STAC
21 #define ASM_CLAC
22 #define __m(x) *(x)
23
24 struct extable_ip_fixup {
25         uint64_t insn;
26         uint64_t fixup;
27 };
28
29 #define _ASM_EXTABLE_INIT()                                                                             \
30         asm volatile(                                                                                           \
31         " .pushsection \"__ex_table\",\"a\"\n"                                          \
32         " .balign 16\n"                                                                                         \
33         " .popsection\n"                                                                                        \
34         : :)
35
36 #define _ASM_EXTABLE(from, to)                                                                  \
37         " .pushsection \"__ex_table\",\"a\"\n"                                          \
38         " .balign 16\n"                                                                                         \
39         " .quad (" #from ") - .\n"                                                                      \
40         " .quad (" #to ") - .\n"                                                                        \
41         " .popsection\n"
42
43 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
44         asm volatile(ASM_STAC "\n"                                                                                      \
45                                  "1:        mov"itype" %"rtype"1,%2\n"                                  \
46                      "2: " ASM_CLAC "\n"                                                                        \
47                                  ".section .fixup,\"ax\"\n"                                                             \
48                                  "3:        mov %3,%0\n"                                                                \
49                                  "  jmp 2b\n"                                                                                   \
50                                  ".previous\n"                                                                                  \
51                                  _ASM_EXTABLE(1b, 3b)                                                                   \
52                                  : "=r"(err)                                                                                    \
53                                  : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
54
55 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
56         asm volatile(ASM_STAC "\n"                                      \
57                                  "1:        mov"itype" %2,%"rtype"1\n"              \
58                                  "2: " ASM_CLAC "\n"                                \
59                                  ".section .fixup,\"ax\"\n"                                                     \
60                                  "3:        mov %3,%0\n"                            \
61                                  "  xor"itype" %"rtype"1,%"rtype"1\n"               \
62                                  "  jmp 2b\n"                                       \
63                                  ".previous\n"                                      \
64                                  _ASM_EXTABLE(1b, 3b)                               \
65                                  : "=r" (err), ltype(x)                             \
66                                  : "m" (__m(addr)), "i" (errret), "0" (err))
67
68 #define __user_memcpy(dst, src, count, err, errret)                                             \
69         asm volatile(ASM_STAC "\n"                                                                                      \
70                                  "1:        rep movsb\n"                                                                \
71                      "2: " ASM_CLAC "\n"                                                                        \
72                                  ".section .fixup,\"ax\"\n"                                                             \
73                                  "3:        mov %4,%0\n"                                                                \
74                                  "  jmp 2b\n"                                                                                   \
75                                  ".previous\n"                                                                                  \
76                                  _ASM_EXTABLE(1b, 3b)                                                                   \
77                                  : "=r"(err)                                                                                    \
78                                  : "D" (dst), "S" (src), "c" (count), "i" (errret), "0" (err))
79
80 static inline int copy_to_user(void *dst, const void *src, unsigned int count)
81 {
82         int err = 0;
83
84         if (unlikely(!is_user_rwaddr(dst, count))) {
85                 err = -EFAULT;
86         } else if (!__builtin_constant_p(count)) {
87                 __user_memcpy(dst, src, count, err, -EFAULT);
88         } else {
89                 switch (count) {
90                 case 1:
91                         __put_user_asm(*(const uint8_t *) src, (uint8_t *) dst, err, "b",
92                                                    "b", "iq", -EFAULT);
93                         break;
94                 case 2:
95                         __put_user_asm(*(const uint16_t *) src, (uint16_t *) dst, err, "w",
96                                                    "w", "ir", -EFAULT);
97                         break;
98                 case 4:
99                         __put_user_asm(*(const uint32_t *) src, (uint32_t *) dst, err, "l",
100                                                    "k", "ir", -EFAULT);
101                         break;
102                 case 8:
103                         __put_user_asm(*(const uint64_t *) src, (uint64_t *) dst, err, "q",
104                                                    "", "er", -EFAULT);
105                         break;
106                 default:
107                         __user_memcpy(dst, src, count, err, -EFAULT);
108                 }
109         }
110
111         return err;
112 }
113
114 static inline int copy_from_user(void *dst, const void *src,
115                                                                  unsigned int count)
116 {
117         int err = 0;
118
119         if (unlikely(!is_user_raddr((void *) src, count))) {
120                 err = -EFAULT;
121         } else if (!__builtin_constant_p(count)) {
122                 __user_memcpy(dst, src, count, err, -EFAULT);
123         } else {
124                 switch (count) {
125                 case 1:
126                         __get_user_asm(*(uint8_t *) dst, (const uint8_t *) src, err, "b",
127                                                    "b", "=q", -EFAULT);
128                         break;
129                 case 2:
130                         __get_user_asm(*(uint16_t *) dst, (const uint16_t *) src, err, "w",
131                                                    "w", "=r", -EFAULT);
132                         break;
133                 case 4:
134                         __get_user_asm(*(uint32_t *) dst, (const uint32_t *) src, err, "l",
135                                                    "k", "=r", -EFAULT);
136                         break;
137                 case 8:
138                         __get_user_asm(*(uint64_t *) dst, (const uint64_t *) src, err, "q",
139                                                    "", "=r", -EFAULT);
140                         break;
141                 default:
142                         __user_memcpy(dst, src, count, err, -EFAULT);
143                 }
144         }
145
146         return err;
147 }
148
149 static inline uintptr_t ex_insn_addr(const struct extable_ip_fixup *x)
150 {
151         return (uintptr_t) &x->insn + x->insn;
152 }
153
154 static inline uintptr_t ex_fixup_addr(const struct extable_ip_fixup *x)
155 {
156         return (uintptr_t) &x->fixup + x->fixup;
157 }