Added safe user memory access APIs
authorDavide Libenzi <dlibenzi@google.com>
Thu, 15 Oct 2015 22:26:00 +0000 (15:26 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Fri, 30 Oct 2015 20:02:29 +0000 (16:02 -0400)
Added safe user memory access APIs, which allows kernel code to
copy data to and from user memory, with zero cost on the fast path.
The exception table facility can also be used in other cases, where
we are executing a potentially faulting instruction.
The code is coming from the Linux kernel version 3.11.10, most of
it from the arch/x86/include/asm/uaccess.h include file.

Signed-off-by: Davide Libenzi <dlibenzi@google.com>
[Touched up checkpatch complaint and compiler.h]
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
kern/arch/x86/uaccess.h [new file with mode: 0644]
kern/include/ex_table.h [new file with mode: 0644]
kern/src/ex_table.c [new file with mode: 0644]

diff --git a/kern/arch/x86/uaccess.h b/kern/arch/x86/uaccess.h
new file mode 100644 (file)
index 0000000..90ffea8
--- /dev/null
@@ -0,0 +1,157 @@
+/* Copyright (c) 2015 Google Inc
+ * Davide Libenzi <dlibenzi@google.com>
+ * See LICENSE for details.
+ *
+ * Part of this code coming from a Linux kernel file:
+ *
+ * linux/arch/x86/include/asm/uaccess.h
+ *
+ * Which, even though missing specific copyright, it is supposed to be
+ * ruled by the overall Linux copyright.
+ */
+
+#pragma once
+
+#include <ros/errno.h>
+#include <compiler.h>
+#include <stdint.h>
+#include <umem.h>
+
+#define ASM_STAC
+#define ASM_CLAC
+#define __m(x) *(x)
+
+struct extable_ip_fixup {
+       uint64_t insn;
+       uint64_t fixup;
+};
+
+#define _ASM_EXTABLE_INIT()                                                                            \
+       asm volatile(                                                                                           \
+       " .pushsection \"__ex_table\",\"a\"\n"                                          \
+       " .balign 16\n"                                                                                         \
+       " .popsection\n"                                                                                        \
+       : :)
+
+#define _ASM_EXTABLE(from, to)                                                                 \
+       " .pushsection \"__ex_table\",\"a\"\n"                                          \
+       " .balign 16\n"                                                                                         \
+       " .quad (" #from ") - .\n"                                                                      \
+       " .quad (" #to ") - .\n"                                                                        \
+       " .popsection\n"
+
+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
+       asm volatile(ASM_STAC "\n"                                                                                      \
+                                "1:        mov"itype" %"rtype"1,%2\n"                                  \
+                    "2: " ASM_CLAC "\n"                                                                        \
+                                ".section .fixup,\"ax\"\n"                                                             \
+                                "3:        mov %3,%0\n"                                                                \
+                                "  jmp 2b\n"                                                                                   \
+                                ".previous\n"                                                                                  \
+                                _ASM_EXTABLE(1b, 3b)                                                                   \
+                                : "=r"(err)                                                                                    \
+                                : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       asm volatile(ASM_STAC "\n"                                      \
+                                "1:        mov"itype" %2,%"rtype"1\n"              \
+                                "2: " ASM_CLAC "\n"                                \
+                                ".section .fixup,\"ax\"\n"                                                     \
+                                "3:        mov %3,%0\n"                            \
+                                "  xor"itype" %"rtype"1,%"rtype"1\n"               \
+                                "  jmp 2b\n"                                       \
+                                ".previous\n"                                      \
+                                _ASM_EXTABLE(1b, 3b)                               \
+                                : "=r" (err), ltype(x)                             \
+                                : "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __user_memcpy(dst, src, count, err, errret)                                            \
+       asm volatile(ASM_STAC "\n"                                                                                      \
+                                "1:        rep movsb\n"                                                                \
+                    "2: " ASM_CLAC "\n"                                                                        \
+                                ".section .fixup,\"ax\"\n"                                                             \
+                                "3:        mov %4,%0\n"                                                                \
+                                "  jmp 2b\n"                                                                                   \
+                                ".previous\n"                                                                                  \
+                                _ASM_EXTABLE(1b, 3b)                                                                   \
+                                : "=r"(err)                                                                                    \
+                                : "D" (dst), "S" (src), "c" (count), "i" (errret), "0" (err))
+
+static inline int copy_to_user(void *dst, const void *src, unsigned int count)
+{
+       int err = 0;
+
+       if (unlikely(!is_user_rwaddr(dst, count))) {
+               err = -EFAULT;
+       } else if (!__builtin_constant_p(count)) {
+               __user_memcpy(dst, src, count, err, -EFAULT);
+       } else {
+               switch (count) {
+               case 1:
+                       __put_user_asm(*(const uint8_t *) src, (uint8_t *) dst, err, "b",
+                                                  "b", "iq", -EFAULT);
+                       break;
+               case 2:
+                       __put_user_asm(*(const uint16_t *) src, (uint16_t *) dst, err, "w",
+                                                  "w", "ir", -EFAULT);
+                       break;
+               case 4:
+                       __put_user_asm(*(const uint32_t *) src, (uint32_t *) dst, err, "l",
+                                                  "k", "ir", -EFAULT);
+                       break;
+               case 8:
+                       __put_user_asm(*(const uint64_t *) src, (uint64_t *) dst, err, "q",
+                                                  "", "er", -EFAULT);
+                       break;
+               default:
+                       __user_memcpy(dst, src, count, err, -EFAULT);
+               }
+       }
+
+       return err;
+}
+
+static inline int copy_from_user(void *dst, const void *src,
+                                                                unsigned int count)
+{
+       int err = 0;
+
+       if (unlikely(!is_user_raddr((void *) src, count))) {
+               err = -EFAULT;
+       } else if (!__builtin_constant_p(count)) {
+               __user_memcpy(dst, src, count, err, -EFAULT);
+       } else {
+               switch (count) {
+               case 1:
+                       __get_user_asm(*(uint8_t *) dst, (const uint8_t *) src, err, "b",
+                                                  "b", "=q", -EFAULT);
+                       break;
+               case 2:
+                       __get_user_asm(*(uint16_t *) dst, (const uint16_t *) src, err, "w",
+                                                  "w", "=r", -EFAULT);
+                       break;
+               case 4:
+                       __get_user_asm(*(uint32_t *) dst, (const uint32_t *) src, err, "l",
+                                                  "k", "=r", -EFAULT);
+                       break;
+               case 8:
+                       __get_user_asm(*(uint64_t *) dst, (const uint64_t *) src, err, "q",
+                                                  "", "=r", -EFAULT);
+                       break;
+               default:
+                       __user_memcpy(dst, src, count, err, -EFAULT);
+               }
+       }
+
+       return err;
+}
+
+static inline uintptr_t ex_insn_addr(const struct extable_ip_fixup *x)
+{
+       return (uintptr_t) &x->insn + x->insn;
+}
+
+static inline uintptr_t ex_fixup_addr(const struct extable_ip_fixup *x)
+{
+       return (uintptr_t) &x->fixup + x->fixup;
+}
diff --git a/kern/include/ex_table.h b/kern/include/ex_table.h
new file mode 100644 (file)
index 0000000..8ef2885
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef ROS_INC_EX_TABLE_H
+#define ROS_INC_EX_TABLE_H
+
+#include <stdint.h>
+
+void exception_table_init(void);
+uintptr_t get_fixup_ip(uintptr_t xip);
+
+#endif /* ROS_INC_EX_TABLE_H */
diff --git a/kern/src/ex_table.c b/kern/src/ex_table.c
new file mode 100644 (file)
index 0000000..b8a5d2b
--- /dev/null
@@ -0,0 +1,73 @@
+/* Copyright (c) 2015 Google Inc
+ * Davide Libenzi <dlibenzi@google.com>
+ * See LICENSE for details.
+ *
+ * Part of this code coming from a Linux kernel file:
+ *
+ * linux/arch/x86/mm/extable.c
+ *
+ * Which, even though missing specific copyright, it is supposed to be
+ * ruled by the overall Linux copyright.
+ */
+
+#include <ex_table.h>
+#include <arch/uaccess.h>
+#include <sort.h>
+
+extern struct extable_ip_fixup __start___ex_table;
+extern struct extable_ip_fixup __stop___ex_table;
+
+static int fixup_cmp(const void *f1, const void *f2)
+{
+       return ((const struct extable_ip_fixup *) f1)->insn <
+               ((const struct extable_ip_fixup *) f2)->insn ? -1 : 1;
+}
+
+void exception_table_init(void)
+{
+       struct extable_ip_fixup *first = &__start___ex_table;
+       struct extable_ip_fixup *last = &__stop___ex_table;
+       uint64_t offset = 0;
+
+       for (struct extable_ip_fixup *fx = first; fx < last; fx++) {
+               fx->insn += offset;
+               offset += sizeof(fx->insn);
+               fx->fixup += offset;
+               offset += sizeof(fx->fixup);
+       }
+
+       sort(first, last - first, sizeof(*first), fixup_cmp);
+
+       offset = 0;
+       for (struct extable_ip_fixup *fx = first; fx < last; fx++) {
+               fx->insn -= offset;
+               offset += sizeof(fx->insn);
+               fx->fixup -= offset;
+               offset += sizeof(fx->fixup);
+       }
+
+       /* Avoid undefined __start___ex_table and __stop___ex_table errors when
+        * no code is using the exception table facility.
+        */
+       _ASM_EXTABLE_INIT();
+}
+
+uintptr_t get_fixup_ip(uintptr_t xip)
+{
+       const struct extable_ip_fixup *first = &__start___ex_table;
+       const struct extable_ip_fixup *last = &__stop___ex_table;
+
+       while (first <= last) {
+               const struct extable_ip_fixup *x = first + ((last - first) >> 1);
+               uintptr_t insn = ex_insn_addr(x);
+
+               if (insn < xip)
+                       first = x + 1;
+               else if (insn > xip)
+                       last = x - 1;
+               else
+                       return (uintptr_t) ex_fixup_addr(x);
+       }
+
+       return 0;
+}