Added support for static branch hinting (XCC)
[akaros.git] / kern / include / ros / common.h
1 #ifndef ROS_COMMON_H
2 #define ROS_COMMON_H
3
4 #ifndef __ASSEMBLER__
5
6 #include <stddef.h>
7 #include <stdint.h>
8 #include <sys/types.h>
9 #include <stdbool.h>
10 #include <ros/compiler.h>
11
12 typedef uintptr_t physaddr_t;
13 typedef long intreg_t;
14 typedef unsigned long uintreg_t;
15
16 #ifndef NULL
17 #define NULL ((void*) 0)
18 #endif
19
20 #ifndef TRUE
21 #define TRUE    1
22 #endif
23
24 #ifndef FALSE
25 #define FALSE   0
26 #endif
27
28 #define FOR_CIRC_BUFFER(next, size, var) \
29         for (int _var = 0, var = (next); _var < (size); _var++, var = (var + 1) % (size))
30
31 #define STRINGIFY(s) __STRINGIFY(s)
32 #define __STRINGIFY(s) #s
33
34 /* A macro for testing if another macro has been #defined or not.  Can be used
35  * wherever you need a boolean defined.  Returns 0 or 1. */
36 #define is_defined(macro) is_defined_(macro)
37 #define is_defined_test_1 ,
38 #define is_defined_(value) is_defined__(is_defined_test_##value, value)
39 #define is_defined__(comma, value) is_defined___(comma 1, 0)
40 #define is_defined___(_, v, ...) v
41
42 // Efficient min and max operations
43 #ifdef ROS_KERNEL /* Glibc or other user libs have their own */
44 #define MIN(_a, _b)                                             \
45 ({                                                              \
46         typeof(_a) __a = (_a);                                  \
47         typeof(_b) __b = (_b);                                  \
48         __a <= __b ? __a : __b;                                 \
49 })
50 #define MAX(_a, _b)                                             \
51 ({                                                              \
52         typeof(_a) __a = (_a);                                  \
53         typeof(_b) __b = (_b);                                  \
54         __a >= __b ? __a : __b;                                 \
55 })
56
57 /* Other kernel-only includes */
58
59 /* Test for alignment, e.g. 2^6 */
60 #define ALIGNED(p, a)   (!(((uintptr_t)(p)) & ((a)-1)))
61 /* Aligns x up to the mask, e.g. (2^6 - 1) (round up if any mask bits are set)*/
62 #define __ALIGN_MASK(x, mask) (((uintptr_t)(x) + (mask)) & ~(mask))
63 /* Aligns x up to the alignment, e.g. 2^6. */
64 #define ALIGN(x, a) ((typeof(x)) __ALIGN_MASK(x, (a) - 1))
65 /* Will return false for 0.  Debatable, based on what you want. */
66 #define IS_PWR2(x) ((x) && !((x) & (x - 1)))
67
68 #define ARRAY_SIZE(x) (sizeof((x))/sizeof((x)[0]))
69
70 #endif
71
72 /* Rounding operations (efficient when n is a power of 2)
73  * Round down to the nearest multiple of n.
74  * The compiler should compile out the branch.  This is needed for 32 bit, so
75  * that we can round down uint64_t, without chopping off the top 32 bits. */
76 #define ROUNDDOWN(a, n)                                                        \
77 ({                                                                             \
78         typeof(a) __b;                                                             \
79         if (sizeof(a) == 8) {                                                      \
80                 uint64_t __a = (uint64_t) (a);                                         \
81                 __b = (typeof(a)) (__a - __a % (n));                                   \
82         } else {                                                                   \
83                 uintptr_t __a = (uintptr_t) (a);                                       \
84                 __b = (typeof(a)) (__a - __a % (n));                                   \
85         }                                                                          \
86         __b;                                                                       \
87 })
88
89 /* Round up to the nearest multiple of n */
90 #define ROUNDUP(a, n)                                                          \
91 ({                                                                             \
92         typeof(a) __b;                                                             \
93         if (sizeof(a) == 8) {                                                      \
94                 uint64_t __n = (uint64_t) (n);                                         \
95                 __b = (typeof(a)) (ROUNDDOWN((uint64_t) (a) + __n - 1, __n));          \
96         } else {                                                                   \
97                 uintptr_t __n = (uintptr_t) (n);                                       \
98                 __b = (typeof(a)) (ROUNDDOWN((uintptr_t) (a) + __n - 1, __n));         \
99         }                                                                          \
100         __b;                                                                       \
101 })
102
103 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
104
105 // Return the integer logarithm of the value provided rounded down
106 static inline uintptr_t LOG2_DOWN(uintptr_t value)
107 {
108         value |= 1;  // clz(0) is undefined, just or in a 1 bit and define it
109         // intrinsic __builtin_clz supported by both > gcc4.6 and LLVM > 1.5
110         return (sizeof(value) == 8) ? 63 - __builtin_clzll(value)
111                                     : 31 - __builtin_clz(value);
112 }
113
114 // Return the integer logarithm of the value provided rounded up
115 static inline uintptr_t LOG2_UP(uintptr_t value)
116 {
117         uintptr_t ret = LOG2_DOWN(value);
118         ret += 0 != (value ^ ((uintptr_t) 1 << ret));  // Add 1 if a lower bit set
119         return ret;
120 }
121
122 static inline uintptr_t ROUNDUPPWR2(uintptr_t value)
123 {
124         return 1 << LOG2_UP(value);
125 }
126
127 static inline uintptr_t ROUNDDOWNPWR2(uintptr_t value)
128 {
129         return 1 << LOG2_DOWN(value);
130 }
131
132 /* We wraparound if UINT_MAX < a * b, which is also UINT_MAX / a < b. */
133 static inline bool mult_will_overflow_u64(uint64_t a, uint64_t b)
134 {
135         if (!a)
136                 return FALSE;
137         return (uint64_t)(-1) / a < b;
138 }
139
140 // Return the offset of 'member' relative to the beginning of a struct type
141 #ifndef offsetof
142 #define offsetof(type, member)  ((size_t) (&((type*)0)->member))
143 #endif
144
145 /* Return the container/struct holding the object 'ptr' points to */
146 #define container_of(ptr, type, member) ({                                     \
147         (type*)((char*)ptr - offsetof(type, member));                             \
148 })
149
150 /* Force the reading exactly once of x.  You may still need mbs().  See
151  * http://lwn.net/Articles/508991/ for more info. */
152 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
153
154 /* Makes sure func is run exactly once.  Can handle concurrent callers, and
155  * other callers spin til the func is complete. */
156 #define run_once(func)                                                         \
157 {                                                                              \
158         static bool ran_once = FALSE;                                              \
159         static atomic_t is_running = FALSE;                                        \
160         if (!ran_once) {                                                           \
161                 if (!atomic_swap(&is_running, TRUE)) {                                 \
162                         /* we won the race and get to run the func */                      \
163                         func;                                                              \
164                         wmb();  /* don't let the ran_once write pass previous writes */    \
165                         ran_once = TRUE;                                                   \
166                 } else {                                                               \
167                         /* someone else won, wait til they are done to break out */        \
168                         while (!ran_once)                                                  \
169                                 cpu_relax();                                                   \
170                                                                                \
171                 }                                                                      \
172         }                                                                          \
173 }
174
175 /* Unprotected, single-threaded version, makes sure func is run exactly once */
176 #define run_once_racy(func)                                                    \
177 {                                                                              \
178         static bool ran_once = FALSE;                                              \
179         if (!ran_once) {                                                           \
180                 func;                                                                  \
181                 ran_once = TRUE;                                                       \
182         }                                                                          \
183 }
184
185 /* Aborts with 'retcmd' if this function has already been called.  Compared to
186  * run_once, this is put at the top of a function that can be called from
187  * multiple sources but should only execute once. */
188 #define init_once_racy(retcmd)                                                 \
189 {                                                                              \
190         static bool initialized = FALSE;                                           \
191         if (initialized) {                                                         \
192                 retcmd;                                                                \
193         }                                                                          \
194         initialized = TRUE;                                                        \
195 }
196
197 #endif /* __ASSEMBLER__ */
198
199 #endif /* ROS_COMMON_H */