Initial commit of RISC-V architecture port
[akaros.git] / kern / arch / riscv / atomic.h
1 #ifndef ROS_INCLUDE_ATOMIC_H
2 #define ROS_INCLUDE_ATOMIC_H
3
4 #include <ros/common.h>
5 #include <arch/arch.h>
6
7 typedef void* atomic_t;
8 struct spinlock {
9         volatile uint32_t rlock;
10 };
11 typedef struct spinlock spinlock_t;
12 #define SPINLOCK_INITIALIZER {0}
13
14 bool atomic_comp_swap(uintptr_t* addr, uintptr_t exp_val, uintptr_t new_val);
15
16 static inline void atomic_init(atomic_t* number, uintptr_t val)
17 {
18   *(uintptr_t*)number = val;
19 }
20
21 static inline uintptr_t atomic_read(atomic_t* number)
22 {
23   return *(uintptr_t*)number;
24 }
25
26 static inline void atomic_set(atomic_t* number, uintptr_t val)
27 {
28   *(uintptr_t*)number = val;
29 }
30
31 /* Adds val to number, returning number's original value */
32 static inline uintptr_t atomic_fetch_and_add(atomic_t* number, uintptr_t val)
33 {
34         return __sync_fetch_and_add((uintptr_t*)number, val);
35 }
36
37 static inline void atomic_add(atomic_t* number, uintptr_t val)
38 {
39         atomic_fetch_and_add(number, val);
40 }
41
42 static inline void atomic_inc(atomic_t* number)
43 {
44         atomic_add(number, 1);
45 }
46
47 static inline void atomic_dec(atomic_t* number)
48 {
49         atomic_add(number, -1);
50 }
51
52 /* Adds val to number, so long as number was not zero.  Returns TRUE if the
53  * operation succeeded (added, not zero), returns FALSE if number is zero. */
54 static inline bool atomic_add_not_zero(atomic_t* number, uintptr_t val)
55 {
56         uintptr_t old_num, new_num;
57         do {
58                 old_num = atomic_read(number);
59                 if (!old_num)
60                         return FALSE;
61                 new_num = old_num + val;
62         } while (!atomic_comp_swap((uintptr_t*)number, old_num, new_num));
63         return TRUE;
64 }
65
66 /* Subtraces val from number, returning True if the new value is 0. */
67 static inline bool atomic_sub_and_test(atomic_t* number, uintptr_t val)
68 {
69         return __sync_fetch_and_sub((uintptr_t*)number, val) == val;
70 }
71
72 static inline void atomic_or(atomic_t* number, uintptr_t mask)
73 {
74         __sync_fetch_and_or(number, mask);
75 }
76
77 static inline uintptr_t atomic_swap(uintptr_t* addr, uintptr_t val)
78 {
79         return __sync_lock_test_and_set(addr, val); // yes, really
80 }
81
82 // RISC-V has atomic word ops, not byte ops, so we must manipulate addresses
83 static inline void atomic_andb(volatile uint8_t* number, uint8_t mask)
84 {
85         uintptr_t offset = (uintptr_t)number & 3;
86         uint32_t wmask = (1<<(8*offset+8)) - (1<<(8*offset));
87         wmask = ~wmask | ((uint32_t)mask << (8*offset));
88
89         __sync_fetch_and_and((uint32_t*)((uintptr_t)number & ~3), wmask);
90 }
91
92 static inline void atomic_orb(volatile uint8_t* number, uint8_t mask)
93 {
94         uintptr_t offset = (uintptr_t)number & 3;
95         uint32_t wmask = (uint32_t)mask << (8*offset);
96
97         __sync_fetch_and_or((uint32_t*)((uintptr_t)number & ~3), wmask);
98 }
99
100 static inline uint32_t spin_locked(spinlock_t* lock)
101 {
102         return lock->rlock;
103 }
104
105 static inline void __spin_lock(volatile uint32_t* rlock)
106 {
107         while(__sync_fetch_and_or(rlock, 1))
108                 while(*rlock);
109 }
110
111 static inline void spin_lock(spinlock_t *lock)
112 {
113         __spin_lock(&lock->rlock);
114 }
115
116 static inline void spin_unlock(spinlock_t *lock)
117 {
118         /* Need to prevent the compiler (and some arches) from reordering older
119          * stores */
120         wmb();
121         lock->rlock = 0;
122 }
123
124 static inline void spinlock_init(spinlock_t *lock)
125 {
126         lock->rlock = 0;
127 }
128
129 static inline void spinlock_debug(spinlock_t* lock)
130 {
131 }
132
133 #endif /* !ROS_INCLUDE_ATOMIC_H */