Use builtin_clz log2 for x86 architectures.
[akaros.git] / kern / include / ros / common.h
1 #ifndef ROS_COMMON_H
2 #define ROS_COMMON_H
3
4 #ifndef __ASSEMBLER__
5 #ifndef __IVY__
6 #include <ros/noivy.h>
7 #endif
8
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <sys/types.h>
12 #include <stdbool.h>
13
14 typedef uintptr_t physaddr_t;
15 typedef long intreg_t;
16 typedef unsigned long uintreg_t;
17
18 #ifndef NULL
19 #define NULL ((void*) 0)
20 #endif
21
22 #ifndef TRUE
23 #define TRUE    1
24 #endif
25
26 #ifndef FALSE
27 #define FALSE   0
28 #endif
29
30 #define KiB     1024u
31 #define MiB     1048576u
32 #define GiB     1073741824u
33 #define TiB     1099511627776ull
34 #define PiB     1125899906842624ull
35 #define EiB     1152921504606846976ull
36
37 /* Test for alignment, e.g. 2^6 */
38 #define ALIGNED(p, a)   (!(((uintptr_t)(p)) & ((a)-1)))
39 /* Aligns x up to the mask, e.g. (2^6 - 1) */
40 #define __ALIGN_MASK(x, mask)       (((x) + (mask)) & ~(mask))
41 /* Will return false for 0.  Debatable, based on what you want. */
42 #define IS_PWR2(x) ((x) && !((x) & (x - 1)))
43
44 #define ARRAY_SIZE(x) (sizeof((x))/sizeof((x)[0]))
45
46 #define CHECK_FLAG(flags,bit)   ((flags) & (1 << (bit)))
47
48 #define FOR_CIRC_BUFFER(next, size, var) \
49         for (int _var = 0, var = (next); _var < (size); _var++, var = (var + 1) % (size))
50
51 #define STRINGIFY(s) __STRINGIFY(s)
52 #define __STRINGIFY(s) #s
53
54 /* A macro for testing if another macro has been #defined or not.  Can be used
55  * wherever you need a boolean defined.  Returns 0 or 1. */
56 #define is_defined(macro) is_defined_(macro)
57 #define is_defined_test_1 ,
58 #define is_defined_(value) is_defined__(is_defined_test_##value, value)
59 #define is_defined__(comma, value) is_defined___(comma 1, 0)
60 #define is_defined___(_, v, ...) v
61
62 // Efficient min and max operations
63 #ifdef ROS_KERNEL /* Glibc has their own */
64 #define MIN(_a, _b)                                             \
65 ({                                                              \
66         typeof(_a) __a = (_a);                                  \
67         typeof(_b) __b = (_b);                                  \
68         __a <= __b ? __a : __b;                                 \
69 })
70 #define MAX(_a, _b)                                             \
71 ({                                                              \
72         typeof(_a) __a = (_a);                                  \
73         typeof(_b) __b = (_b);                                  \
74         __a >= __b ? __a : __b;                                 \
75 })
76 #endif
77
78 /* Rounding operations (efficient when n is a power of 2)
79  * Round down to the nearest multiple of n.
80  * The compiler should compile out the branch.  This is needed for 32 bit, so
81  * that we can round down uint64_t, without chopping off the top 32 bits. */
82 #define ROUNDDOWN(a, n)                                                        \
83 ({                                                                             \
84         typeof(a) __b;                                                             \
85         if (sizeof(a) == 8) {                                                      \
86                 uint64_t __a = (uint64_t) (a);                                         \
87                 __b = (typeof(a)) (__a - __a % (n));                                   \
88         } else {                                                                   \
89                 uintptr_t __a = (uintptr_t) (a);                                       \
90                 __b = (typeof(a)) (__a - __a % (n));                                   \
91         }                                                                          \
92         __b;                                                                       \
93 })
94
95 /* Round up to the nearest multiple of n */
96 #define ROUNDUP(a, n)                                                          \
97 ({                                                                             \
98         typeof(a) __b;                                                             \
99         if (sizeof(a) == 8) {                                                      \
100                 uint64_t __n = (uint64_t) (n);                                         \
101                 __b = (typeof(a)) (ROUNDDOWN((uint64_t) (a) + __n - 1, __n));          \
102         } else {                                                                   \
103                 uintptr_t __n = (uintptr_t) (n);                                       \
104                 __b = (typeof(a)) (ROUNDDOWN((uintptr_t) (a) + __n - 1, __n));         \
105         }                                                                          \
106         __b;                                                                       \
107 })
108
109 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
110
111 // Return the integer logarithm of the value provided rounded down
112 #if __x86_64__ || __i386__
113 // Should be __has_builtin(__builtin_clz) but gcc doesn't support it
114 // x86 compiler intrinsic supported by both > gcc4.6 and LLVM > 1.5
115 static inline uintptr_t LOG2_DOWN(uintptr_t value)
116 {
117         value |= 1;
118         return (sizeof(value) == 8) ? 63 - __builtin_clzll(value)
119                                     : 31 - __builtin_clz(value);
120 }
121 #else
122 // TODO(gvdl): Does the gcc on riscv support __builtin_clz?
123 #warning "Using loop based LOG2_DOWN, no __builtin_clz?"
124 static inline uintptr_t LOG2_DOWN(uintptr_t value)
125 {
126         uintptr_t l = 0;
127         while( (value >> l) > 1 ) ++l;
128         return l;
129 }
130 #endif
131
132 // Return the integer logarithm of the value provided rounded up
133 static inline uintptr_t LOG2_UP(uintptr_t value)
134 {
135         uintptr_t ret = LOG2_DOWN(value);
136         ret += 0 != (value ^ ((uintptr_t) 1 << ret));  // Add 1 if a lower bit set
137         return ret;
138 }
139
140 static inline uintptr_t ROUNDUPPWR2(uintptr_t value)
141 {
142         return 1 << LOG2_UP(value);
143 }
144
145 static inline uintptr_t ROUNDDOWNPWR2(uintptr_t value)
146 {
147         return 1 << LOG2_DOWN(value);
148 }
149
150 /* We wraparound if UINT_MAX < a * b, which is also UINT_MAX / a < b. */
151 static inline bool mult_will_overflow_u64(uint64_t a, uint64_t b)
152 {
153         if (!a)
154                 return FALSE;
155         return (uint64_t)(-1) / a < b;
156 }
157
158 // Return the offset of 'member' relative to the beginning of a struct type
159 #ifndef offsetof
160 #define offsetof(type, member)  ((size_t) (&((type*)0)->member))
161 #endif
162
163 /* Return the container/struct holding the object 'ptr' points to */
164 #define container_of(ptr, type, member) ({                                     \
165         (type*)((char*)ptr - offsetof(type, member));                             \
166 })
167
168 /* Force the reading exactly once of x.  You may still need mbs().  See
169  * http://lwn.net/Articles/508991/ for more info. */
170 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
171
172 /* Makes sure func is run exactly once.  Can handle concurrent callers, and
173  * other callers spin til the func is complete. */
174 #define run_once(func)                                                         \
175 {                                                                              \
176         static bool ran_once = FALSE;                                              \
177         static atomic_t is_running = FALSE;                                        \
178         if (!ran_once) {                                                           \
179                 if (!atomic_swap(&is_running, TRUE)) {                                 \
180                         /* we won the race and get to run the func */                      \
181                         func;                                                              \
182                         wmb();  /* don't let the ran_once write pass previous writes */    \
183                         ran_once = TRUE;                                                   \
184                 } else {                                                               \
185                         /* someone else won, wait til they are done to break out */        \
186                         while (!ran_once)                                                  \
187                                 cpu_relax();                                                   \
188                                                                                \
189                 }                                                                      \
190         }                                                                          \
191 }
192
193 /* Unprotected, single-threaded version, makes sure func is run exactly once */
194 #define run_once_racy(func)                                                    \
195 {                                                                              \
196         static bool ran_once = FALSE;                                              \
197         if (!ran_once) {                                                           \
198                 func;                                                                  \
199                 ran_once = TRUE;                                                       \
200         }                                                                          \
201 }
202
203 /* Aborts with 'retcmd' if this function has already been called.  Compared to
204  * run_once, this is put at the top of a function that can be called from
205  * multiple sources but should only execute once. */
206 #define init_once_racy(retcmd)                                                 \
207 {                                                                              \
208         static bool initialized = FALSE;                                           \
209         if (initialized) {                                                         \
210                 retcmd;                                                                \
211         }                                                                          \
212         initialized = TRUE;                                                        \
213 }
214
215 #endif /* __ASSEMBLER__ */
216
217 #endif /* ROS_COMMON_H */