akaros/kern/arch/riscv/atomic.h
<<
>>
Prefs
   1#pragma once
   2
   3#include <ros/common.h>
   4#include <arch/arch.h>
   5
   6#ifdef __riscv64
   7# define LR_P "lr.d"
   8# define SC_P "sc.d"
   9#else
  10# define LR_P "lr.w"
  11# define SC_P "sc.w"
  12#endif
  13
  14static bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
  15{
  16  return __sync_bool_compare_and_swap(addr, exp_val, new_val);
  17}
  18
  19static bool atomic_cas_ptr(void** addr, void* exp_val, void* new_val)
  20{
  21  return __sync_bool_compare_and_swap(addr, exp_val, new_val);
  22}
  23
  24static bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val, uint32_t new_val)
  25{
  26  return __sync_bool_compare_and_swap(addr, exp_val, new_val);
  27}
  28
  29static inline void atomic_init(atomic_t *number, long val)
  30{
  31  *(volatile long*)number = val;
  32}
  33
  34static inline long atomic_read(atomic_t *number)
  35{
  36  return *(volatile long*)number;
  37}
  38
  39static inline void atomic_set(atomic_t *number, long val)
  40{
  41  *(volatile long*)number = val;
  42}
  43
  44/* Adds val to number, returning number's original value */
  45static inline long atomic_fetch_and_add(atomic_t *number, long val)
  46{
  47        return __sync_fetch_and_add((long*)number, val);
  48}
  49
  50static inline void atomic_add(atomic_t *number, long val)
  51{
  52        atomic_fetch_and_add(number, val);
  53}
  54
  55static inline void atomic_inc(atomic_t *number)
  56{
  57        atomic_add(number, 1);
  58}
  59
  60static inline void atomic_dec(atomic_t *number)
  61{
  62        atomic_add(number, -1);
  63}
  64
  65/* Adds val to number, so long as number was not zero.  Returns TRUE if the
  66 * operation succeeded (added, not zero), returns FALSE if number is zero. */
  67static inline bool atomic_add_not_zero(atomic_t *num, long inc)
  68{
  69        long res, tmp;
  70        asm volatile ("1:\n"
  71                      LR_P " %0, 0(%2)\n"     // tmp = *num; lock line
  72                      "li    %1, 1\n"         // res = 1
  73                      "beqz  %0, 2f\n"        // if (val == 0) goto fail
  74                      "add   %0, %0, %3\n"    // tmp += inc
  75                      SC_P " %1, %0, 0(%2)\n" // if (locked) *num = tmp
  76                      "bnez  %1, 1b\n"        // else goto retry
  77                                  "2:\n"
  78                      : "=&r"(tmp), "=&r"(res) : "r"(num), "r"(inc) : "memory");
  79        return res == 0;
  80}
  81
  82/* Subtraces val from number, returning True if the new value is 0. */
  83static inline bool atomic_sub_and_test(atomic_t *number, long val)
  84{
  85        return __sync_fetch_and_sub((long*)number, val) == val;
  86}
  87
  88static inline void atomic_and(atomic_t *number, long mask)
  89{
  90        __sync_fetch_and_and(number, mask);
  91}
  92
  93static inline void atomic_or(atomic_t *number, long mask)
  94{
  95        __sync_fetch_and_or(number, mask);
  96}
  97
  98static inline long atomic_swap(atomic_t *addr, long val)
  99{
 100        return (long)__sync_lock_test_and_set(addr, val); // yes, really
 101}
 102
 103static inline void *atomic_swap_ptr(void **addr, void *val)
 104{
 105        return (void*)__sync_lock_test_and_set(addr, val);
 106}
 107
 108static inline uint32_t atomic_swap_u32(uint32_t *addr, uint32_t val)
 109{
 110        return __sync_lock_test_and_set(addr, val); // yes, really
 111}
 112
 113// RISC-V has atomic word ops, not byte ops, so we must manipulate addresses
 114static inline void atomic_andb(volatile uint8_t* number, uint8_t mask)
 115{
 116        uintptr_t offset = (uintptr_t)number & 3;
 117        uint32_t wmask = (1<<(8*offset+8)) - (1<<(8*offset));
 118        wmask = ~wmask | ((uint32_t)mask << (8*offset));
 119
 120        __sync_fetch_and_and((uint32_t*)((uintptr_t)number & ~3), wmask);
 121}
 122
 123static inline void atomic_orb(volatile uint8_t* number, uint8_t mask)
 124{
 125        uintptr_t offset = (uintptr_t)number & 3;
 126        uint32_t wmask = (uint32_t)mask << (8*offset);
 127
 128        __sync_fetch_and_or((uint32_t*)((uintptr_t)number & ~3), wmask);
 129}
 130
 131static inline bool spin_locked(spinlock_t* lock)
 132{
 133        return lock->rlock;
 134}
 135
 136static inline bool __spin_trylock(spinlock_t *lock)
 137{
 138        return !__sync_fetch_and_or(&lock->rlock, 1);
 139}
 140
 141static inline void __spin_lock(spinlock_t *lock)
 142{
 143        do
 144        {
 145                while (lock->rlock)
 146                        ;
 147        } while (!__spin_trylock(lock));
 148        mb();
 149}
 150
 151static inline void __spin_unlock(spinlock_t *lock)
 152{
 153        mb();
 154        lock->rlock = 0;
 155}
 156
 157static inline void __spinlock_init(spinlock_t *lock)
 158{
 159        lock->rlock = 0;
 160}
 161