perf: Use a user_data blob for perf_event (XCC)
[akaros.git] / kern / arch / x86 / uaccess.h
index 80551e4..b3d6f27 100644 (file)
@@ -27,16 +27,16 @@ struct extable_ip_fixup {
 
 #define __read_msr_asm(eax, edx, addr, err, errret)                                            \
        asm volatile(ASM_STAC "\n"                                                                                      \
-                                "1:            rdmsr\n"                                                                        \
-                                "                      mfence\n"                                                                       \
-                                "2: " ASM_CLAC "\n"                                                                    \
-                                ".section .fixup,\"ax\"\n"                                                             \
-                                "3:            mov %4,%0\n"                                                            \
-                                "      jmp 2b\n"                                                                                       \
-                                ".previous\n"                                                                                  \
-                                _ASM_EXTABLE(1b, 3b)                                                                   \
-                                : "=r" (err), "=d" (edx), "=a" (eax)                                   \
-                                : "c" (addr), "i" (errret), "0" (err))
+                    "1:                rdmsr\n"                                                                        \
+                    "                  mfence\n"                                                                       \
+                    "2: " ASM_CLAC "\n"                                                                        \
+                    ".section .fixup,\"ax\"\n"                                                         \
+                    "3:                mov %4,%0\n"                                                            \
+                    "  jmp 2b\n"                                                                                       \
+                    ".previous\n"                                                                                      \
+                    _ASM_EXTABLE(1b, 3b)                                                                       \
+                    : "=r" (err), "=d" (edx), "=a" (eax)                                       \
+                    : "c" (addr), "i" (errret), "0" (err))
 
 #define __write_msr_asm(val, addr, err, errret)                                                        \
        asm volatile(ASM_STAC "\n"                                                                                      \
@@ -98,19 +98,19 @@ static inline int __put_user(void *dst, const void *src, unsigned int count)
        switch (count) {
        case 1:
                __put_user_asm(*(const uint8_t *) src, (uint8_t *) dst, err, "b",
-                                          "b", "iq", -EFAULT);
+                              "b", "iq", -EFAULT);
                break;
        case 2:
                __put_user_asm(*(const uint16_t *) src, (uint16_t *) dst, err, "w",
-                                          "w", "ir", -EFAULT);
+                              "w", "ir", -EFAULT);
                break;
        case 4:
                __put_user_asm(*(const uint32_t *) src, (uint32_t *) dst, err, "l",
-                                          "k", "ir", -EFAULT);
+                              "k", "ir", -EFAULT);
                break;
        case 8:
                __put_user_asm(*(const uint64_t *) src, (uint64_t *) dst, err, "q",
-                                          "", "er", -EFAULT);
+                              "", "er", -EFAULT);
                break;
        default:
                __user_memcpy(dst, src, count, err, -EFAULT);
@@ -141,19 +141,19 @@ static inline int __get_user(void *dst, const void *src, unsigned int count)
        switch (count) {
        case 1:
                __get_user_asm(*(uint8_t *) dst, (const uint8_t *) src, err, "b",
-                                          "b", "=q", -EFAULT);
+                              "b", "=q", -EFAULT);
                break;
        case 2:
                __get_user_asm(*(uint16_t *) dst, (const uint16_t *) src, err, "w",
-                                          "w", "=r", -EFAULT);
+                              "w", "=r", -EFAULT);
                break;
        case 4:
                __get_user_asm(*(uint32_t *) dst, (const uint32_t *) src, err, "l",
-                                          "k", "=r", -EFAULT);
+                              "k", "=r", -EFAULT);
                break;
        case 8:
                __get_user_asm(*(uint64_t *) dst, (const uint64_t *) src, err, "q",
-                                          "", "=r", -EFAULT);
+                              "", "=r", -EFAULT);
                break;
        default:
                __user_memcpy(dst, src, count, err, -EFAULT);
@@ -163,7 +163,7 @@ static inline int __get_user(void *dst, const void *src, unsigned int count)
 }
 
 static inline int copy_from_user(void *dst, const void *src,
-                                                                unsigned int count)
+                                 unsigned int count)
 {
        int err = 0;
 
@@ -178,7 +178,7 @@ static inline int copy_from_user(void *dst, const void *src,
        return err;
 }
 
-static inline int safe_read_msr(uint32_t addr, uint64_t *value)
+static inline int read_msr_safe(uint32_t addr, uint64_t *value)
 {
        int err = 0;
        uint32_t edx, eax;
@@ -190,7 +190,7 @@ static inline int safe_read_msr(uint32_t addr, uint64_t *value)
        return err;
 }
 
-static inline int safe_write_msr(uint32_t addr, uint64_t value)
+static inline int write_msr_safe(uint32_t addr, uint64_t value)
 {
        int err = 0;