kmalloc_align()
[akaros.git] / kern / include / ros / common.h
1 #ifndef ROS_COMMON_H
2 #define ROS_COMMON_H
3
4 #ifndef __ASSEMBLER__
5 #ifndef __IVY__
6 #include <ros/noivy.h>
7 #endif
8
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <sys/types.h>
12 #include <stdbool.h>
13
14 typedef uintptr_t physaddr_t;
15 typedef long intreg_t;
16 typedef unsigned long uintreg_t;
17
18 #ifndef NULL
19 #define NULL ((void*) 0)
20 #endif
21
22 #ifndef TRUE
23 #define TRUE    1
24 #endif
25
26 #ifndef FALSE
27 #define FALSE   0
28 #endif
29
30 #define KiB     1024u
31 #define MiB     1048576u
32 #define GiB     1073741824u
33 #define TiB     1099511627776ull
34 #define PiB     1125899906842624ull
35 #define EiB     1152921504606846976ull
36
37 #define ALIGNED(p, a)   (!(((uintptr_t)(p)) & ((a)-1)))
38
39 #define ARRAY_SIZE(x) (sizeof((x))/sizeof((x)[0]))
40
41 #define CHECK_FLAG(flags,bit)   ((flags) & (1 << (bit)))
42
43 #define FOR_CIRC_BUFFER(next, size, var) \
44         for (int _var = 0, var = (next); _var < (size); _var++, var = (var + 1) % (size))
45
46 // Efficient min and max operations
47 #ifdef ROS_KERNEL /* Glibc has their own */
48 #define MIN(_a, _b)                                             \
49 ({                                                              \
50         typeof(_a) __a = (_a);                                  \
51         typeof(_b) __b = (_b);                                  \
52         __a <= __b ? __a : __b;                                 \
53 })
54 #define MAX(_a, _b)                                             \
55 ({                                                              \
56         typeof(_a) __a = (_a);                                  \
57         typeof(_b) __b = (_b);                                  \
58         __a >= __b ? __a : __b;                                 \
59 })
60 #endif
61
62 /* Rounding operations (efficient when n is a power of 2)
63  * Round down to the nearest multiple of n.
64  * The compiler should compile out the branch.  This is needed for 32 bit, so
65  * that we can round down uint64_t, without chopping off the top 32 bits. */
66 #define ROUNDDOWN(a, n)                                                        \
67 ({                                                                             \
68         typeof(a) __b;                                                             \
69         if (sizeof(a) == 8) {                                                      \
70                 uint64_t __a = (uint64_t) (a);                                         \
71                 __b = (typeof(a)) (__a - __a % (n));                                   \
72         } else {                                                                   \
73                 uintptr_t __a = (uintptr_t) (a);                                       \
74                 __b = (typeof(a)) (__a - __a % (n));                                   \
75         }                                                                          \
76         __b;                                                                       \
77 })
78
79 /* Round up to the nearest multiple of n */
80 #define ROUNDUP(a, n)                                                          \
81 ({                                                                             \
82         typeof(a) __b;                                                             \
83         if (sizeof(a) == 8) {                                                      \
84                 uint64_t __n = (uint64_t) (n);                                         \
85                 __b = (typeof(a)) (ROUNDDOWN((uint64_t) (a) + __n - 1, __n));          \
86         } else {                                                                   \
87                 uintptr_t __n = (uintptr_t) (n);                                       \
88                 __b = (typeof(a)) (ROUNDDOWN((uintptr_t) (a) + __n - 1, __n));         \
89         }                                                                          \
90         __b;                                                                       \
91 })
92
93 // Return the integer logarithm of the value provided rounded down
94 static inline uintptr_t LOG2_DOWN(uintptr_t value)
95 {
96         uintptr_t l = 0;
97         while( (value >> l) > 1 ) ++l;
98         return l;
99 }
100
101 // Return the integer logarithm of the value provided rounded up
102 static inline uintptr_t LOG2_UP(uintptr_t value)
103 {
104         uintptr_t _v = LOG2_DOWN(value);
105         if (value ^ (1 << _v))
106                 return _v + 1;
107         else
108                 return _v;
109 }
110
111 static inline uintptr_t ROUNDUPPWR2(uintptr_t value)
112 {
113         return 1 << LOG2_UP(value);
114 }
115
116 static inline uintptr_t ROUNDDOWNPWR2(uintptr_t value)
117 {
118         return 1 << LOG2_DOWN(value);
119 }
120
121 /* We wraparound if UINT_MAX < a * b, which is also UINT_MAX / a < b. */
122 static inline bool mult_will_overflow_u64(uint64_t a, uint64_t b)
123 {
124         if (!a)
125                 return FALSE;
126         return (uint64_t)(-1) / a < b;
127 }
128
129 // Return the offset of 'member' relative to the beginning of a struct type
130 #ifndef offsetof
131 #define offsetof(type, member)  ((size_t) (&((type*)0)->member))
132 #endif
133
134 /* Return the container/struct holding the object 'ptr' points to */
135 #define container_of(ptr, type, member) ({                                     \
136         (type*)((char*)ptr - offsetof(type, member));                             \
137 })
138
139 /* Force the reading exactly once of x.  You may still need mbs().  See
140  * http://lwn.net/Articles/508991/ for more info. */
141 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
142
143 // Ivy currently can only handle 63 bits (OCaml thing), so use this to make
144 // a uint64_t programatically
145 #define UINT64(upper, lower) ( (((uint64_t)(upper)) << 32) | (lower) )
146
147 /* Makes sure func is run exactly once.  Can handle concurrent callers, and
148  * other callers spin til the func is complete. */
149 #define run_once(func)                                                         \
150 {                                                                              \
151         static bool ran_once = FALSE;                                              \
152         static atomic_t is_running = FALSE;                                        \
153         if (!ran_once) {                                                           \
154                 if (!atomic_swap(&is_running, TRUE)) {                                 \
155                         /* we won the race and get to run the func */                      \
156                         func;                                                              \
157                         wmb();  /* don't let the ran_once write pass previous writes */    \
158                         ran_once = TRUE;                                                   \
159                 } else {                                                               \
160                         /* someone else won, wait til they are done to break out */        \
161                         while (!ran_once)                                                  \
162                                 cpu_relax();                                                   \
163                                                                                \
164                 }                                                                      \
165         }                                                                          \
166 }
167
168 /* Unprotected, single-threaded version, makes sure func is run exactly once */
169 #define run_once_racy(func)                                                    \
170 {                                                                              \
171         static bool ran_once = FALSE;                                              \
172         if (!ran_once) {                                                           \
173                 func;                                                                  \
174                 ran_once = TRUE;                                                       \
175         }                                                                          \
176 }
177
178 /* Aborts with 'retcmd' if this function has already been called.  Compared to
179  * run_once, this is put at the top of a function that can be called from
180  * multiple sources but should only execute once. */
181 #define init_once_racy(retcmd)                                                 \
182 {                                                                              \
183         static bool initialized = FALSE;                                           \
184         if (initialized) {                                                         \
185                 retcmd;                                                                \
186         }                                                                          \
187         initialized = TRUE;                                                        \
188 }
189
190 #endif /* __ASSEMBLER__ */
191
192 #endif /* ROS_COMMON_H */