Add RISC-V CAS via load-reserved/store conditional
authorAndrew Waterman <waterman@cs.berkeley.edu>
Wed, 27 Mar 2013 09:59:49 +0000 (02:59 -0700)
committerAndrew Waterman <waterman@cs.berkeley.edu>
Wed, 27 Mar 2013 10:05:50 +0000 (03:05 -0700)
kern/arch/riscv/Makefrag
kern/arch/riscv/atomic.c [deleted file]
kern/arch/riscv/atomic.h
tools/compilers/gcc-glibc/binutils-2.21.1-riscv.patch
tools/compilers/gcc-glibc/gcc-4.6.1-riscv.patch
tools/compilers/gcc-glibc/glibc-2.14.1-riscv.patch
user/parlib/include/riscv/atomic.h
user/parlib/riscv/vcore.S

index 7b8cc97..ce1f49f 100644 (file)
@@ -19,7 +19,6 @@ KERN_ARCH_SRCFILES := $(KERN_ARCH_SRC_DIR)/boot.S \
                       $(KERN_ARCH_SRC_DIR)/console.c \
                       $(KERN_ARCH_SRC_DIR)/pmap.c \
                       $(KERN_ARCH_SRC_DIR)/time.c \
-                      $(KERN_ARCH_SRC_DIR)/atomic.c \
                       $(KERN_ARCH_SRC_DIR)/smp.c \
                       $(KERN_ARCH_SRC_DIR)/colored_caches.c \
                       $(KERN_ARCH_SRC_DIR)/page_alloc.c \
diff --git a/kern/arch/riscv/atomic.c b/kern/arch/riscv/atomic.c
deleted file mode 100644 (file)
index d4b8ac6..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-#include <atomic.h>
-
-// This emulates compare and swap by hashing the address into one of
-// K buckets, acquiring the lock for that bucket, then performing the
-// operation during the critical section.  :-(
-bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
-{
-       if ((long)*addr != exp_val)
-               return 0;
-       
-  #define K 17
-       /* TODO: not sure if this initialization works. */
-       static spinlock_t cas_locks[K*ARCH_CL_SIZE/sizeof(spinlock_t)] =
-                         {SPINLOCK_INITIALIZER_IRQSAVE};
-
-  uintptr_t bucket = (uintptr_t)addr / sizeof(uintptr_t) % K;
-       spinlock_t* lock = &cas_locks[bucket*ARCH_CL_SIZE/sizeof(spinlock_t)];
-       
-       bool retval = 0;
-       spin_lock_irqsave(lock);
-       if ((long)*addr == exp_val) {
-               atomic_swap(addr, new_val);
-               retval = 1;
-       }
-       spin_unlock_irqsave(lock);
-       return retval;
-}
-
-bool atomic_cas_ptr(void **addr, void *exp_val, void *new_val)
-{
-       return atomic_cas((atomic_t*)addr, (long)exp_val, (long)new_val);
-}
-
-/* Ghetto, copied the regular CAS code... */
-bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val, uint32_t new_val)
-{
-       if (*addr != exp_val)
-               return 0;
-       
-  #define K 17
-       /* TODO: not sure if this initialization works. */
-       static spinlock_t cas_locks[K*ARCH_CL_SIZE/sizeof(spinlock_t)] =
-                         {SPINLOCK_INITIALIZER_IRQSAVE};
-
-  uintptr_t bucket = (uintptr_t)addr / sizeof(uintptr_t) % K;
-       spinlock_t* lock = &cas_locks[bucket*ARCH_CL_SIZE/sizeof(spinlock_t)];
-       
-       bool retval = 0;
-       spin_lock_irqsave(lock);
-       if (*addr == exp_val) {
-               atomic_swap_u32(addr, new_val);
-               retval = 1;
-       }
-       spin_unlock_irqsave(lock);
-       return retval;
-}
index 0d35bc8..dc0ac2b 100644 (file)
@@ -4,9 +4,28 @@
 #include <ros/common.h>
 #include <arch/arch.h>
 
-bool atomic_cas(atomic_t *addr, long exp_val, long new_val);
-bool atomic_cas_ptr(void **addr, void *exp_val, void *new_val);
-bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val, uint32_t new_val);
+#ifdef __riscv64
+# define LR_P "lr.d"
+# define SC_P "sc.d"
+#else
+# define LR_P "lr.w"
+# define SC_P "sc.w"
+#endif
+
+static bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
+{
+  return __sync_bool_compare_and_swap(addr, exp_val, new_val);
+}
+
+static bool atomic_cas_ptr(void** addr, void* exp_val, void* new_val)
+{
+  return __sync_bool_compare_and_swap(addr, exp_val, new_val);
+}
+
+static bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val, uint32_t new_val)
+{
+  return __sync_bool_compare_and_swap(addr, exp_val, new_val);
+}
 
 static inline void atomic_init(atomic_t *number, long val)
 {
index c355113..722a472 100644 (file)
@@ -19470,10 +19470,10 @@ index 0000000..3ab671f
 +#endif /* _ELF_RISCV_H */
 diff --git a/binutils-2.21.1/include/opcode/riscv-opc.h b/binutils-2.21.1/include/opcode/riscv-opc.h
 new file mode 100644
-index 0000000..7bad495
+index 0000000..8a94c8e
 --- /dev/null
 +++ binutils-2.21.1/include/opcode/riscv-opc.h
-@@ -0,0 +1,563 @@
+@@ -0,0 +1,571 @@
 +/* Automatically generated by parse-opcodes */
 +#define MATCH_MOVN 0x6f7
 +#define  MASK_MOVN 0x1ffff
@@ -19483,6 +19483,8 @@ index 0000000..7bad495
 +#define  MASK_REMUW 0x1ffff
 +#define MATCH_FMIN_D 0x180d3
 +#define  MASK_FMIN_D 0x1ffff
++#define MATCH_LR_W 0x1012b
++#define  MASK_LR_W 0x3fffff
 +#define MATCH_VLSTHU 0x128b
 +#define  MASK_VLSTHU 0x1ffff
 +#define MATCH_C_SWSP 0x8
@@ -19669,6 +19671,8 @@ index 0000000..7bad495
 +#define  MASK_MTFSR 0x3fffff
 +#define MATCH_VSSTH 0x108f
 +#define  MASK_VSSTH 0x1ffff
++#define MATCH_SC_W 0x1052b
++#define  MASK_SC_W 0x1ffff
 +#define MATCH_REM 0x733
 +#define  MASK_REM 0x1ffff
 +#define MATCH_SRLIW 0x29b
@@ -19731,6 +19735,8 @@ index 0000000..7bad495
 +#define  MASK_SRL 0x1ffff
 +#define MATCH_VENQCMD 0x2b7b
 +#define  MASK_VENQCMD 0xf801ffff
++#define MATCH_FSUB_D 0x10d3
++#define  MASK_FSUB_D 0x1f1ff
 +#define MATCH_VFMTS 0x1973
 +#define  MASK_VFMTS 0x1ffff
 +#define MATCH_VENQIMM1 0x2f7b
@@ -19817,8 +19823,8 @@ index 0000000..7bad495
 +#define  MASK_AMOADD_D 0x1ffff
 +#define MATCH_C_SW 0xd
 +#define  MASK_C_SW 0x1f
-+#define MATCH_AMOMAX_W 0x152b
-+#define  MASK_AMOMAX_W 0x1ffff
++#define MATCH_LR_D 0x101ab
++#define  MASK_LR_D 0x3fffff
 +#define MATCH_C_MOVE 0x2
 +#define  MASK_C_MOVE 0x801f
 +#define MATCH_FMOVN 0xef7
@@ -19923,6 +19929,8 @@ index 0000000..7bad495
 +#define  MASK_VFLSEGW 0x1ffff
 +#define MATCH_VLSEGSTH 0x88b
 +#define  MASK_VLSEGSTH 0xfff
++#define MATCH_AMOMAX_W 0x152b
++#define  MASK_AMOMAX_W 0x1ffff
 +#define MATCH_FSGNJ_D 0x50d3
 +#define  MASK_FSGNJ_D 0x1ffff
 +#define MATCH_VFLSEGSTW 0xd0b
@@ -19953,8 +19961,8 @@ index 0000000..7bad495
 +#define  MASK_FCVT_LU_D 0x3ff1ff
 +#define MATCH_VFLD 0x58b
 +#define  MASK_VFLD 0x3fffff
-+#define MATCH_FSUB_D 0x10d3
-+#define  MASK_FSUB_D 0x1f1ff
++#define MATCH_SC_D 0x105ab
++#define  MASK_SC_D 0x1ffff
 +#define MATCH_FMADD_S 0x43
 +#define  MASK_FMADD_S 0x1ff
 +#define MATCH_FCVT_W_S 0xa053
@@ -21758,10 +21766,10 @@ index 0000000..f890f07
 +}
 diff --git a/binutils-2.21.1/opcodes/riscv-opc.c b/binutils-2.21.1/opcodes/riscv-opc.c
 new file mode 100644
-index 0000000..a973ed6
+index 0000000..3d4410a
 --- /dev/null
 +++ binutils-2.21.1/opcodes/riscv-opc.c
-@@ -0,0 +1,462 @@
+@@ -0,0 +1,466 @@
 +/* mips-opc.c -- MIPS opcode list.
 +   Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
 +   2003, 2004, 2005, 2007, 2008, 2009  Free Software Foundation, Inc.
@@ -21890,6 +21898,10 @@ index 0000000..a973ed6
 +{"amomaxu.w",         "d,t,0(b)",     MATCH_AMOMAXU_W, MASK_AMOMAXU_W,         WR_xd|RD_xs1|RD_xs2 },
 +{"amomin.w",          "d,t,0(b)",     MATCH_AMOMIN_W, MASK_AMOMIN_W,   WR_xd|RD_xs1|RD_xs2 },
 +{"amominu.w",         "d,t,0(b)",     MATCH_AMOMINU_W, MASK_AMOMINU_W,         WR_xd|RD_xs1|RD_xs2 },
++{"lr.w",              "d,0(b)",       MATCH_LR_W, MASK_LR_W,   WR_xd|RD_xs1 },
++{"lr.d",              "d,0(b)",       MATCH_LR_D, MASK_LR_D,   WR_xd|RD_xs1 },
++{"sc.w",              "d,t,0(b)",     MATCH_SC_W, MASK_SC_W,   WR_xd|RD_xs1|RD_xs2 },
++{"sc.d",              "d,t,0(b)",     MATCH_SC_D, MASK_SC_D,   WR_xd|RD_xs1|RD_xs2 },
 +{"and",     "d,s,t",  MATCH_AND, MASK_AND,     WR_xd|RD_xs1|RD_xs2 },
 +{"and",    "d,s,j",   MATCH_ANDI, MASK_ANDI,   WR_xd|RD_xs1 },
 +{"andi",    "d,s,j",  MATCH_ANDI, MASK_ANDI,   WR_xd|RD_xs1 },
index 2596ac6..aa24d37 100644 (file)
@@ -10462,10 +10462,10 @@ index 0000000..2c52f88
 +#define SWITCHABLE_TARGET 1
 diff --git a/gcc-4.6.1/gcc/config/riscv/riscv.md b/gcc-4.6.1/gcc/config/riscv/riscv.md
 new file mode 100644
-index 0000000..216153d
+index 0000000..bc2a772
 --- /dev/null
 +++ gcc-4.6.1/gcc/config/riscv/riscv.md
-@@ -0,0 +1,3042 @@
+@@ -0,0 +1,3045 @@
 +;;  Mips.md        Machine Description for MIPS based processors
 +;;  Copyright (C) 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
 +;;  1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
@@ -10905,6 +10905,9 @@ index 0000000..216153d
 +;; This attribute gives the format suffix for floating-point operations.
 +(define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")])
 +
++;; This attribute gives the format suffix for atomic memory operations.
++(define_mode_attr amo [(SI "w") (DI "d")])
++
 +;; This attribute gives the upper-case mode name for one unit of a
 +;; floating-point mode.
 +(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF")])
@@ -13613,10 +13616,10 @@ index 0000000..bf19839
 +#define LINUX_DYNAMIC_LINKER64 GLIBC_DYNAMIC_LINKER64
 diff --git a/gcc-4.6.1/gcc/config/riscv/sync.md b/gcc-4.6.1/gcc/config/riscv/sync.md
 new file mode 100644
-index 0000000..14da636
+index 0000000..3455bd1
 --- /dev/null
 +++ gcc-4.6.1/gcc/config/riscv/sync.md
-@@ -0,0 +1,109 @@
+@@ -0,0 +1,92 @@
 +;;  Machine Description for MIPS based processor synchronization
 +;;  instructions.
 +;;  Copyright (C) 2007, 2008, 2009, 2010
@@ -13669,63 +13672,46 @@ index 0000000..14da636
 +  ""
 +  "fence")
 +
-+(define_insn "sync_<optab>di"
-+  [(set (match_operand:DI 0 "memory_operand" "+YR")
-+      (unspec_volatile:DI
-+          [(any_atomic:DI (match_dup 0)
-+                   (match_operand:DI 1 "register_operand" "d"))]
++(define_insn "sync_<optab><mode>"
++  [(set (match_operand:GPR 0 "memory_operand" "+YR")
++      (unspec_volatile:GPR
++          [(any_atomic:GPR (match_dup 0)
++                   (match_operand:GPR 1 "register_operand" "d"))]
 +       UNSPEC_SYNC_OLD_OP))]
-+  "TARGET_64BIT"
-+  "amo<insn>.d zero,%1,%0")
++  ""
++  "amo<insn>.<amo> zero,%1,%0")
 +
-+(define_insn "sync_old_<optab>di"
-+  [(set (match_operand:DI 0 "register_operand" "=&d")
-+      (match_operand:DI 1 "memory_operand" "+YR"))
++(define_insn "sync_old_<optab><mode>"
++  [(set (match_operand:GPR 0 "register_operand" "=&d")
++      (match_operand:GPR 1 "memory_operand" "+YR"))
 +   (set (match_dup 1)
-+      (unspec_volatile:DI
-+          [(any_atomic:DI (match_dup 1)
-+                   (match_operand:DI 2 "register_operand" "d"))]
-+       UNSPEC_SYNC_OLD_OP))]
-+  "TARGET_64BIT"
-+  "amo<insn>.d %0,%2,%1")
-+
-+(define_insn "sync_lock_test_and_setdi"
-+  [(set (match_operand:DI 0 "register_operand" "=&d")
-+      (match_operand:DI 1 "memory_operand" "+YR"))
-+   (set (match_dup 1)
-+      (unspec_volatile:DI [(match_operand:DI 2 "register_operand" "d")]
-+       UNSPEC_SYNC_EXCHANGE))]
-+  "TARGET_64BIT"
-+  "amoswap.d %0,%2,%1")
-+
-+(define_insn "sync_<optab>si"
-+  [(set (match_operand:SI 0 "memory_operand" "+YR")
-+      (unspec_volatile:SI
-+          [(any_atomic:SI (match_dup 0)
-+                   (match_operand:SI 1 "register_operand" "d"))]
++      (unspec_volatile:GPR
++          [(any_atomic:GPR (match_dup 1)
++                   (match_operand:GPR 2 "register_operand" "d"))]
 +       UNSPEC_SYNC_OLD_OP))]
 +  ""
-+  "amo<insn>.w zero,%1,%0")
++  "amo<insn>.<amo> %0,%2,%1")
 +
-+(define_insn "sync_old_<optab>si"
-+  [(set (match_operand:SI 0 "register_operand" "=&d")
-+      (match_operand:SI 1 "memory_operand" "+YR"))
++(define_insn "sync_lock_test_and_set<mode>"
++  [(set (match_operand:GPR 0 "register_operand" "=&d")
++      (match_operand:GPR 1 "memory_operand" "+YR"))
 +   (set (match_dup 1)
-+      (unspec_volatile:SI
-+          [(any_atomic:SI (match_dup 1)
-+                   (match_operand:SI 2 "register_operand" "d"))]
-+       UNSPEC_SYNC_OLD_OP))]
++      (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "d")]
++       UNSPEC_SYNC_EXCHANGE))]
 +  ""
-+  "amo<insn>.w %0,%2,%1")
++  "amoswap.<amo> %0,%2,%1")
 +
-+(define_insn "sync_lock_test_and_setsi"
-+  [(set (match_operand:SI 0 "register_operand" "=&d")
-+      (match_operand:SI 1 "memory_operand" "+YR"))
++(define_insn "sync_compare_and_swap<mode>"
++  [(set (match_operand:GPR 0 "register_operand" "=&d")
++      (match_operand:GPR 1 "memory_operand" "+YR"))
 +   (set (match_dup 1)
-+      (unspec_volatile:SI [(match_operand:SI 2 "register_operand" "d")]
-+       UNSPEC_SYNC_EXCHANGE))]
++      (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "d")
++                            (match_operand:GPR 3 "reg_or_0_operand" "d")]
++       UNSPEC_COMPARE_AND_SWAP))
++   (clobber (match_scratch:GPR 4 "=d"))]
 +  ""
-+  "amoswap.w %0,%2,%1")
++  "1: lr.<amo> %0,%1; bne %0,%2,1f; sc.<amo> %4,%3,%1; bnez %4,1b; 1:"
++  [(set (attr "length") (const_int 16))])
 diff --git a/gcc-4.6.1/gcc/config/riscv/t-elf b/gcc-4.6.1/gcc/config/riscv/t-elf
 new file mode 100644
 index 0000000..4664de3
index 1e92876..c210899 100644 (file)
@@ -111,10 +111,10 @@ index 0000000..a901452
 +#define ABORT_INSTRUCTION asm ("unimp")
 diff --git a/glibc-2.14.1/sysdeps/riscv/bits/atomic.h b/glibc-2.14.1/sysdeps/riscv/bits/atomic.h
 new file mode 100644
-index 0000000..9f7890e
+index 0000000..6711b84
 --- /dev/null
 +++ glibc-2.14.1/sysdeps/riscv/bits/atomic.h
-@@ -0,0 +1,177 @@
+@@ -0,0 +1,119 @@
 +/* Low-level functions for atomic operations. Mips version.
 +   Copyright (C) 2005 Free Software Foundation, Inc.
 +   This file is part of the GNU C Library.
@@ -155,75 +155,17 @@ index 0000000..9f7890e
 +typedef intmax_t atomic_max_t;
 +typedef uintmax_t uatomic_max_t;
 +
-+/* We have no compare and swap, so we acquire a global lock to emulate it.
-+   We assume no variable will be accessed using atomic.h macros from two
-+   different libraries.  */
-+
-+__make_section_unallocated
-+  (".gnu.linkonce.b.__riscv_atomic_lock, \"aw\", %nobits");
-+
-+volatile int __riscv_atomic_lock
-+  __attribute__ ((nocommon, section(".gnu.linkonce.b.__riscv_atomic_lock\n\t#"),
-+                visibility ("hidden")));
-+
-+#define __riscv_atomic_do_lock(addr) ({ \
-+  extern volatile int __riscv_atomic_lock;                                  \
-+  __riscv_atomic_do_lock24(&__riscv_atomic_lock);                   \
-+  __sync_synchronize(); })
-+
-+#define __riscv_atomic_do_unlock(addr) ({ \
-+  extern volatile int __riscv_atomic_lock;                                  \
-+  __sync_synchronize();                                                     \
-+  __riscv_atomic_lock = 0; })
-+
-+#define __riscv_atomic_do_lock24(addr) ({ \
-+  int __locked_val;                                                     \
-+  int __mask = 0xFF000000;                                              \
-+  while ((__locked_val = __sync_fetch_and_or(addr, __mask)) & __mask)   \
-+    while (*(volatile int*)(addr) & __mask)                           \
-+      ;                                                               \
-+  __locked_val; })
-+
-+/* The only basic operation needed is compare and exchange.  */
-+#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
-+  ({ __typeof (mem) __acev_memp = (mem);                            \
-+     __typeof (*mem) __acev_ret;                                    \
-+     __typeof (*mem) __acev_newval = (newval);                              \
-+                                                                    \
-+     __riscv_atomic_do_lock (__acev_memp);                          \
-+     __acev_ret = *__acev_memp;                                             \
-+     if (__acev_ret == (oldval))                                    \
-+       *__acev_memp = __acev_newval;                                \
-+     __riscv_atomic_do_unlock (__acev_memp);                        \
-+     __acev_ret; })
-+
-+#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
-+  (atomic_compare_and_exchange_val_acq(mem, newval, oldval) != (oldval))
-+
-+/* Special versions, which guarantee that top 8 bits of all values
-+   are cleared and use those bits as the lock.  */
-+#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
-+  ({ __typeof (mem) __acev_memp = (mem);                            \
-+     __typeof (*mem) __acev_ret;                                    \
-+     __typeof (*mem) __acev_newval = (newval);                              \
-+                                                                    \
-+     __acev_ret = __riscv_atomic_do_lock24 (__acev_memp);           \
-+     if (__acev_ret == (oldval))                                    \
-+       *__acev_memp = __acev_newval & 0x00FFFFFF;                   \
-+     else                                                           \
-+       *__acev_memp = __acev_ret;                                   \
-+     __sync_synchronize();                                          \
-+     __acev_ret; })
-+
-+#define atomic_exchange_24_rel(mem, newval) \
-+  ({ __typeof (mem) __acev_memp = (mem);                            \
-+     __typeof (*mem) __acev_ret;                                    \
-+     __typeof (*mem) __acev_newval = (newval);                              \
-+                                                                    \
-+     __sync_synchronize();                                          \
-+     __acev_ret = __riscv_atomic_do_lock24 (__acev_memp);           \
-+     *__acev_memp = __acev_newval & 0x00FFFFFF;                       \
-+     __acev_ret; })
++/* Atomic compare and exchange. */
++
++#define atomic_compare_and_exchange_val_acq(mem, newval, oldval)         \
++  ({ __sync_synchronize();                      \
++     __sync_val_compare_and_swap(mem, oldval, newval); })
++
++#define atomic_compare_and_exchange_val_rel(mem, newval, oldval)         \
++  ({ typeof(*mem) __prev;                       \
++     __prev = __sync_val_compare_and_swap(mem, value);  \
++     __sync_synchronize();                      \
++     __prev; })
 +
 +/* Atomic exchange (without compare).  */
 +
index 750842d..0aadbc6 100644 (file)
@@ -1,10 +1,6 @@
 #ifndef PARLIB_ARCH_ATOMIC_H
 #define PARLIB_ARCH_ATOMIC_H
 
-/* Unlike in x86, we need to include spinlocks in the user atomic ops file.
- * Since compare and swap isn't truely non-blocking, and we can't disable
- * interrupts in userspace, there is a slight chance of deadlock. */
-
 #include <ros/common.h>
 #include <ros/atomic.h>
 #include <ros/arch/membar.h>
@@ -99,39 +95,20 @@ static inline void atomic_orb(volatile uint8_t* number, uint8_t mask)
        __sync_fetch_and_or((uint32_t*)((uintptr_t)number & ~3), wmask);
 }
 
-asm (".section .gnu.linkonce.b.__riscv_ros_atomic_lock, \"aw\", %nobits\n"
-     "\t.previous");
-
-atomic_t __riscv_ros_atomic_lock
-  __attribute__ ((nocommon, section(".gnu.linkonce.b.__riscv_ros_atomic_lock\n\t#"),
-                  visibility ("hidden")));
-
 static inline bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
 {
-       bool retval = 0;
-       long temp;
-
-       if ((long)*addr != exp_val)
-               return 0;
-       if (atomic_swap(&__riscv_ros_atomic_lock, 1))
-               return 0;
-       if ((long)*addr == exp_val) {
-               atomic_swap(addr, new_val);
-               retval = 1;
-       }
-       atomic_set(&__riscv_ros_atomic_lock, 0);
-       return retval;
+       return __sync_bool_compare_and_swap(addr, exp_val, new_val);
 }
 
 static inline bool atomic_cas_ptr(void **addr, void *exp_val, void *new_val)
 {
-       return atomic_cas((atomic_t*)addr, (long)exp_val, (long)new_val);
+       return __sync_bool_compare_and_swap(addr, exp_val, new_val);
 }
 
 static inline bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val,
                                   uint32_t new_val)
 {
-       return atomic_cas((atomic_t*)addr, (long)exp_val, (long)new_val);
+       return __sync_bool_compare_and_swap(addr, exp_val, new_val);
 }
 
 static inline void atomic_or_int(volatile int *number, int mask)
index 7d58718..7c55515 100644 (file)
@@ -3,7 +3,6 @@
 .abicalls
 
 .global __pop_ros_tf_regs
-.ent __pop_ros_tf_regs
 __pop_ros_tf_regs:
 
   REG_L s0,20*SZREG(a0)
@@ -21,10 +20,8 @@ __pop_ros_tf_regs:
   REG_L ra,33*SZREG(a0)
 
   jr    a3
-.end __pop_ros_tf_regs
 
 .global __save_ros_tf_regs
-.ent __save_ros_tf_regs
 __save_ros_tf_regs:
 
   REG_S s0,20*SZREG(a0)
@@ -42,4 +39,3 @@ __save_ros_tf_regs:
   REG_S ra,33*SZREG(a0)
 
   ret
-.end __save_ros_tf_regs