akaros/kern/arch/x86/uaccess.h
<<
>>
Prefs
   1/* Copyright (c) 2015 Google Inc
   2 * Davide Libenzi <dlibenzi@google.com>
   3 * See LICENSE for details.
   4 *
   5 * Part of this code coming from a Linux kernel file:
   6 *
   7 * linux/arch/x86/include/asm/uaccess.h
   8 *
   9 * Which, even though missing specific copyright, it is supposed to be
  10 * ruled by the overall Linux copyright.
  11 */
  12
  13#pragma once
  14
  15#include <ros/errno.h>
  16#include <compiler.h>
  17#include <stdint.h>
  18#include <umem.h>
  19#include <arch/fixup.h>
  20
  21#define __m(x) *(x)
  22
  23struct extable_ip_fixup {
  24        uint64_t insn;
  25        uint64_t fixup;
  26};
  27
  28#define __read_msr_asm(eax, edx, addr, err, errret)                     \
  29        asm volatile(ASM_STAC "\n"                                      \
  30                     "1:rdmsr\n"                                        \
  31                     "  mfence\n"                                       \
  32                     "2: " ASM_CLAC "\n"                                \
  33                     ".section .fixup,\"ax\"\n"                         \
  34                     "3:mov %4,%0\n"                                    \
  35                     "  jmp 2b\n"                                       \
  36                     ".previous\n"                                      \
  37                     _ASM_EXTABLE(1b, 3b)                               \
  38                     : "=r" (err), "=d" (edx), "=a" (eax)               \
  39                     : "c" (addr), "i" (errret), "0" (err))
  40
  41#define __write_msr_asm(val, addr, err, errret)                         \
  42        asm volatile(ASM_STAC "\n"                                      \
  43                     "1:wrmsr\n"                                        \
  44                     "2: " ASM_CLAC "\n"                                \
  45                     ".section .fixup,\"ax\"\n"                         \
  46                     "3:mov %4,%0\n"                                    \
  47                     "  jmp 2b\n"                                       \
  48                     ".previous\n"                                      \
  49                     _ASM_EXTABLE(1b, 3b)                               \
  50                     : "=r" (err)                                       \
  51                     : "d" ((uint32_t) (val >> 32)),                    \
  52                       "a" ((uint32_t) (val & 0xffffffff)), "c" (addr), \
  53                       "i" (errret), "0" (err))
  54
  55#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
  56        asm volatile(ASM_STAC "\n"                                      \
  57                     "1:mov"itype" %"rtype"1,%2\n"                      \
  58                     "2: " ASM_CLAC "\n"                                \
  59                     ".section .fixup,\"ax\"\n"                         \
  60                     "3:mov %3,%0\n"                                    \
  61                     "  jmp 2b\n"                                       \
  62                     ".previous\n"                                      \
  63                     _ASM_EXTABLE(1b, 3b)                               \
  64                     : "=r"(err)                                        \
  65                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
  66
  67#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
  68        asm volatile(ASM_STAC "\n"                                      \
  69                     "1:mov"itype" %2,%"rtype"1\n"                      \
  70                     "2: " ASM_CLAC "\n"                                \
  71                     ".section .fixup,\"ax\"\n"                         \
  72                     "3:mov %3,%0\n"                                    \
  73                     "  xor"itype" %"rtype"1,%"rtype"1\n"               \
  74                     "  jmp 2b\n"                                       \
  75                     ".previous\n"                                      \
  76                     _ASM_EXTABLE(1b, 3b)                               \
  77                     : "=r" (err), ltype(x)                             \
  78                     : "m" (__m(addr)), "i" (errret), "0" (err))
  79
  80#define __user_memcpy(dst, src, count, err, errret)                     \
  81        asm volatile(ASM_STAC "\n"                                      \
  82                     "  cld\n"                                          \
  83                     "1:rep movsb\n"                                    \
  84                     "2: " ASM_CLAC "\n"                                \
  85                     ".section .fixup,\"ax\"\n"                         \
  86                     "3:mov %4,%0\n"                                    \
  87                     "  jmp 2b\n"                                       \
  88                     ".previous\n"                                      \
  89                     _ASM_EXTABLE(1b, 3b)                               \
  90                     : "=r"(err), "+D" (dst), "+S" (src), "+c" (count)  \
  91                     : "i" (errret), "0" (err)                          \
  92                     : "memory")
  93
  94static inline int __put_user(void *dst, const void *src, unsigned int count)
  95{
  96        int err = 0;
  97
  98        switch (count) {
  99        case 1:
 100                __put_user_asm(*(const uint8_t *) src, (uint8_t *) dst, err,
 101                               "b", "b", "iq", -EFAULT);
 102                break;
 103        case 2:
 104                __put_user_asm(*(const uint16_t *) src, (uint16_t *) dst, err,
 105                               "w", "w", "ir", -EFAULT);
 106                break;
 107        case 4:
 108                __put_user_asm(*(const uint32_t *) src, (uint32_t *) dst, err,
 109                               "l", "k", "ir", -EFAULT);
 110                break;
 111        case 8:
 112                __put_user_asm(*(const uint64_t *) src, (uint64_t *) dst, err,
 113                               "q", "", "er", -EFAULT);
 114                break;
 115        default:
 116                __user_memcpy(dst, src, count, err, -EFAULT);
 117        }
 118
 119        return err;
 120}
 121
 122static inline int copy_to_user(void *dst, const void *src, unsigned int count)
 123{
 124        int err = 0;
 125
 126        if (unlikely(!is_user_rwaddr(dst, count))) {
 127                err = -EFAULT;
 128        } else if (!__builtin_constant_p(count)) {
 129                __user_memcpy(dst, src, count, err, -EFAULT);
 130        } else {
 131                err = __put_user(dst, src, count);
 132        }
 133
 134        return err;
 135}
 136
 137static inline int __get_user(void *dst, const void *src, unsigned int count)
 138{
 139        int err = 0;
 140
 141        switch (count) {
 142        case 1:
 143                __get_user_asm(*(uint8_t *) dst, (const uint8_t *) src, err,
 144                               "b", "b", "=q", -EFAULT);
 145                break;
 146        case 2:
 147                __get_user_asm(*(uint16_t *) dst, (const uint16_t *) src, err,
 148                               "w", "w", "=r", -EFAULT);
 149                break;
 150        case 4:
 151                __get_user_asm(*(uint32_t *) dst, (const uint32_t *) src, err,
 152                               "l", "k", "=r", -EFAULT);
 153                break;
 154        case 8:
 155                __get_user_asm(*(uint64_t *) dst, (const uint64_t *) src, err,
 156                               "q", "", "=r", -EFAULT);
 157                break;
 158        default:
 159                __user_memcpy(dst, src, count, err, -EFAULT);
 160        }
 161
 162        return err;
 163}
 164
 165static inline int copy_from_user(void *dst, const void *src,
 166                                 unsigned int count)
 167{
 168        int err = 0;
 169
 170        if (unlikely(!is_user_raddr((void *) src, count))) {
 171                err = -EFAULT;
 172        } else if (!__builtin_constant_p(count)) {
 173                __user_memcpy(dst, src, count, err, -EFAULT);
 174        } else {
 175                err = __get_user(dst, src, count);
 176        }
 177
 178        return err;
 179}
 180
 181static inline int read_msr_safe(uint32_t addr, uint64_t *value)
 182{
 183        int err = 0;
 184        uint32_t edx, eax;
 185
 186        __read_msr_asm(eax, edx, addr, err, -EFAULT);
 187        if (likely(err == 0))
 188                *value = ((uint64_t) edx << 32) | eax;
 189
 190        return err;
 191}
 192
 193static inline int write_msr_safe(uint32_t addr, uint64_t value)
 194{
 195        int err = 0;
 196
 197        __write_msr_asm(value, addr, err, -EFAULT);
 198
 199        return err;
 200}
 201
 202static inline uintptr_t ex_insn_addr(const struct extable_ip_fixup *x)
 203{
 204        return (uintptr_t) &x->insn + x->insn;
 205}
 206
 207static inline uintptr_t ex_fixup_addr(const struct extable_ip_fixup *x)
 208{
 209        return (uintptr_t) &x->fixup + x->fixup;
 210}
 211