atomic_and()
[akaros.git] / kern / arch / riscv / atomic.h
1 #ifndef ROS_INCLUDE_ATOMIC_H
2 #define ROS_INCLUDE_ATOMIC_H
3
4 #include <ros/common.h>
5 #include <arch/arch.h>
6
7 typedef void* atomic_t;
8 struct spinlock {
9         volatile uint32_t rlock;
10 };
11 typedef struct spinlock spinlock_t;
12 #define SPINLOCK_INITIALIZER {0}
13
14 bool atomic_comp_swap(uintptr_t* addr, uintptr_t exp_val, uintptr_t new_val);
15
16 static inline void atomic_init(atomic_t* number, uintptr_t val)
17 {
18   *(uintptr_t*)number = val;
19 }
20
21 static inline uintptr_t atomic_read(atomic_t* number)
22 {
23   return *(uintptr_t*)number;
24 }
25
26 static inline void atomic_set(atomic_t* number, uintptr_t val)
27 {
28   *(uintptr_t*)number = val;
29 }
30
31 /* Adds val to number, returning number's original value */
32 static inline uintptr_t atomic_fetch_and_add(atomic_t* number, uintptr_t val)
33 {
34         return __sync_fetch_and_add((uintptr_t*)number, val);
35 }
36
37 static inline void atomic_add(atomic_t* number, uintptr_t val)
38 {
39         atomic_fetch_and_add(number, val);
40 }
41
42 static inline void atomic_inc(atomic_t* number)
43 {
44         atomic_add(number, 1);
45 }
46
47 static inline void atomic_dec(atomic_t* number)
48 {
49         atomic_add(number, -1);
50 }
51
52 /* Adds val to number, so long as number was not zero.  Returns TRUE if the
53  * operation succeeded (added, not zero), returns FALSE if number is zero. */
54 static inline bool atomic_add_not_zero(atomic_t* number, uintptr_t val)
55 {
56         uintptr_t old_num, new_num;
57         do {
58                 old_num = atomic_read(number);
59                 if (!old_num)
60                         return FALSE;
61                 new_num = old_num + val;
62         } while (!atomic_comp_swap((uintptr_t*)number, old_num, new_num));
63         return TRUE;
64 }
65
66 /* Subtraces val from number, returning True if the new value is 0. */
67 static inline bool atomic_sub_and_test(atomic_t* number, uintptr_t val)
68 {
69         return __sync_fetch_and_sub((uintptr_t*)number, val) == val;
70 }
71
72 static inline void atomic_and(atomic_t *number, uintptr_t mask)
73 {
74         __sync_fetch_and_and(number, mask);
75 }
76
77 static inline void atomic_or(atomic_t* number, uintptr_t mask)
78 {
79         __sync_fetch_and_or(number, mask);
80 }
81
82 static inline uintptr_t atomic_swap(uintptr_t* addr, uintptr_t val)
83 {
84         return __sync_lock_test_and_set(addr, val); // yes, really
85 }
86
87 // RISC-V has atomic word ops, not byte ops, so we must manipulate addresses
88 static inline void atomic_andb(volatile uint8_t* number, uint8_t mask)
89 {
90         uintptr_t offset = (uintptr_t)number & 3;
91         uint32_t wmask = (1<<(8*offset+8)) - (1<<(8*offset));
92         wmask = ~wmask | ((uint32_t)mask << (8*offset));
93
94         __sync_fetch_and_and((uint32_t*)((uintptr_t)number & ~3), wmask);
95 }
96
97 static inline void atomic_orb(volatile uint8_t* number, uint8_t mask)
98 {
99         uintptr_t offset = (uintptr_t)number & 3;
100         uint32_t wmask = (uint32_t)mask << (8*offset);
101
102         __sync_fetch_and_or((uint32_t*)((uintptr_t)number & ~3), wmask);
103 }
104
105 static inline uint32_t spin_locked(spinlock_t* lock)
106 {
107         return lock->rlock;
108 }
109
110 static inline uint32_t spin_trylock(spinlock_t* lock)
111 {
112         return __sync_fetch_and_or(&lock->rlock, 1);
113 }
114
115 static inline void spin_lock(spinlock_t *lock)
116 {
117         while(spin_trylock(lock))
118                 while(lock->rlock);
119 }
120
121 static inline void spin_unlock(spinlock_t *lock)
122 {
123         /* Need to prevent the compiler (and some arches) from reordering older
124          * stores */
125         wmb();
126         lock->rlock = 0;
127 }
128
129 static inline void spinlock_init(spinlock_t *lock)
130 {
131         lock->rlock = 0;
132 }
133
134 static inline void spinlock_debug(spinlock_t* lock)
135 {
136 }
137
138 #endif /* !ROS_INCLUDE_ATOMIC_H */