Optimizes run_once(_safe) (XCC)
[akaros.git] / kern / include / ros / common.h
1 #ifndef ROS_COMMON_H
2 #define ROS_COMMON_H
3
4 #ifndef __IVY__
5 #include <ros/noivy.h>
6 #endif
7
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <sys/types.h>
11 #include <stdbool.h>
12
13 typedef uintptr_t physaddr_t;
14 typedef ssize_t intreg_t;
15 typedef size_t uintreg_t;
16
17 #ifndef NULL
18 #define NULL ((void*) 0)
19 #endif
20
21 #ifndef TRUE
22 #define TRUE    1
23 #endif
24
25 #ifndef FALSE
26 #define FALSE   0
27 #endif
28
29 #define CHECK_FLAG(flags,bit)   ((flags) & (1 << (bit)))
30
31 #define FOR_CIRC_BUFFER(next, size, var) \
32         for (int _var = 0, var = (next); _var < (size); _var++, var = (var + 1) % (size))
33
34 // Efficient min and max operations
35 #ifdef ROS_KERNEL /* Glibc has their own */
36 #define MIN(_a, _b)                                             \
37 ({                                                              \
38         typeof(_a) __a = (_a);                                  \
39         typeof(_b) __b = (_b);                                  \
40         __a <= __b ? __a : __b;                                 \
41 })
42 #define MAX(_a, _b)                                             \
43 ({                                                              \
44         typeof(_a) __a = (_a);                                  \
45         typeof(_b) __b = (_b);                                  \
46         __a >= __b ? __a : __b;                                 \
47 })
48 #endif
49
50 // Rounding operations (efficient when n is a power of 2)
51 // Round down to the nearest multiple of n
52 #define ROUNDDOWN(a, n)                                         \
53 ({                                                              \
54         uintptr_t __a = (uintptr_t) (a);                                \
55         (typeof(a)) (__a - __a % (n));                          \
56 })
57 // Round up to the nearest multiple of n
58 #define ROUNDUP(a, n)                                           \
59 ({                                                              \
60         uintptr_t __n = (uintptr_t) (n);                                \
61         (typeof(a)) (ROUNDDOWN((uintptr_t) (a) + __n - 1, __n));        \
62 })
63
64 // Round down to the nearest multiple of n
65 #define PTRROUNDDOWN(a, n)                                              \
66 ({                                                              \
67         char * __a = (char *) (a);                              \
68         (typeof(a)) (__a - (uintptr_t)__a % (n));                               \
69 })
70 // Round pointer up to the nearest multiple of n
71 #define PTRROUNDUP(a, n)                                                \
72 ({                                                              \
73         uintptr_t __n = (uintptr_t) (n);                                \
74         (typeof(a)) (PTRROUNDDOWN((char *) (a) + __n - 1, __n));        \
75 })
76
77 // Return the integer logarithm of the value provided rounded down
78 static inline uintptr_t LOG2_DOWN(uintptr_t value)
79 {
80         uintptr_t l = 0;
81         while( (value >> l) > 1 ) ++l;
82         return l;
83 }
84
85 // Return the integer logarithm of the value provided rounded up
86 static inline uintptr_t LOG2_UP(uintptr_t value)
87 {
88         uintptr_t _v = LOG2_DOWN(value);
89         if (value ^ (1 << _v))
90                 return _v + 1;
91         else
92                 return _v;
93 }
94
95 static inline uintptr_t ROUNDUPPWR2(uintptr_t value)
96 {
97         return 1 << LOG2_UP(value);
98 }
99
100 /* We wraparound if UINT_MAX < a * b, which is also UINT_MAX / a < b. */
101 static inline bool mult_will_overflow_u64(uint64_t a, uint64_t b)
102 {
103         if (!a)
104                 return FALSE;
105         return (uint64_t)(-1) / a < b;
106 }
107
108 // Return the offset of 'member' relative to the beginning of a struct type
109 #ifndef offsetof
110 #define offsetof(type, member)  ((size_t) (&((type*)0)->member))
111 #endif
112
113 /* Return the container/struct holding the object 'ptr' points to */
114 #define container_of(ptr, type, member) ({                                     \
115         (type*)((char*)ptr - offsetof(type, member));                             \
116 })
117
118 // Ivy currently can only handle 63 bits (OCaml thing), so use this to make
119 // a uint64_t programatically
120 #define UINT64(upper, lower) ( (((uint64_t)(upper)) << 32) | (lower) )
121
122 /* Makes sure func is run exactly once.  Can handle concurrent callers, and
123  * other callers spin til the func is complete. */
124 #define run_once(func)                                                         \
125 {                                                                              \
126         static bool ran_once = FALSE;                                              \
127         static atomic_t is_running = FALSE;                                        \
128         if (!ran_once) {                                                           \
129                 if (!atomic_swap(&is_running, TRUE)) {                                 \
130                         /* we won the race and get to run the func */                      \
131                         func;                                                              \
132                         wmb();  /* don't let the ran_once write pass previous writes */    \
133                         ran_once = TRUE;                                                   \
134                 } else {                                                               \
135                         /* someone else won, wait til they are done to break out */        \
136                         while (!ran_once)                                                  \
137                                 cpu_relax();                                                   \
138                                                                                \
139                 }                                                                      \
140         }                                                                          \
141 }
142
143 /* Unprotected, single-threaded version, makes sure func is run exactly once */
144 #define run_once_racy(func)                                                    \
145 {                                                                              \
146         static bool ran_once = FALSE;                                              \
147         if (!ran_once) {                                                           \
148                 func;                                                                  \
149                 ran_once = TRUE;                                                       \
150         }                                                                          \
151 }
152
153 /* Aborts with 'retcmd' if this function has already been called.  Compared to
154  * run_once, this is put at the top of a function that can be called from
155  * multiple sources but should only execute once. */
156 #define init_once_racy(retcmd)                                                 \
157 {                                                                              \
158         static bool initialized = FALSE;                                           \
159         if (initialized) {                                                         \
160                 retcmd;                                                                \
161         }                                                                          \
162         initialized = TRUE;                                                        \
163 }
164
165 #endif /* ROS_COMMON_H */