Merge origin/netpush (networking code) (XCC)
[akaros.git] / kern / include / ros / common.h
1 #ifndef ROS_COMMON_H
2 #define ROS_COMMON_H
3
4 #ifndef __IVY__
5 #include <ros/noivy.h>
6 #endif
7
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <sys/types.h>
11 #include <stdbool.h>
12
13 typedef uintptr_t physaddr_t;
14 typedef ssize_t intreg_t;
15 typedef size_t uintreg_t;
16
17 #ifndef NULL
18 #define NULL ((void*) 0)
19 #endif
20
21 #ifndef TRUE
22 #define TRUE    1
23 #endif
24
25 #ifndef FALSE
26 #define FALSE   0
27 #endif
28
29 #define CHECK_FLAG(flags,bit)   ((flags) & (1 << (bit)))
30
31 #define FOR_CIRC_BUFFER(next, size, var) \
32         for (int _var = 0, var = (next); _var < (size); _var++, var = (var + 1) % (size))
33
34 // Efficient min and max operations
35 #ifdef ROS_KERNEL /* Glibc has their own */
36 #define MIN(_a, _b)                                             \
37 ({                                                              \
38         typeof(_a) __a = (_a);                                  \
39         typeof(_b) __b = (_b);                                  \
40         __a <= __b ? __a : __b;                                 \
41 })
42 #define MAX(_a, _b)                                             \
43 ({                                                              \
44         typeof(_a) __a = (_a);                                  \
45         typeof(_b) __b = (_b);                                  \
46         __a >= __b ? __a : __b;                                 \
47 })
48 #endif
49
50 #define ROS_MEM_ALIGN 4
51 // Rounding operations (efficient when n is a power of 2)
52 // Round down to the nearest multiple of n
53 #define ROUNDDOWN(a, n)                                         \
54 ({                                                              \
55         uintptr_t __a = (uintptr_t) (a);                                \
56         (typeof(a)) (__a - __a % (n));                          \
57 })
58 // Round up to the nearest multiple of n
59 #define ROUNDUP(a, n)                                           \
60 ({                                                              \
61         uintptr_t __n = (uintptr_t) (n);                                \
62         (typeof(a)) (ROUNDDOWN((uintptr_t) (a) + __n - 1, __n));        \
63 })
64
65 #define MEM_ALIGN_SIZE(size) ROUNDUP(size, ROS_MEM_ALIGN)
66
67 // Round down to the nearest multiple of n
68 #define PTRROUNDDOWN(a, n)                                              \
69 ({                                                              \
70         char * __a = (char *) (a);                              \
71         (typeof(a)) (__a - (uintptr_t)__a % (n));                               \
72 })
73 // Round pointer up to the nearest multiple of n
74 #define PTRROUNDUP(a, n)                                                \
75 ({                                                              \
76         uintptr_t __n = (uintptr_t) (n);                                \
77         (typeof(a)) (PTRROUNDDOWN((char *) (a) + __n - 1, __n));        \
78 })
79
80 // Return the integer logarithm of the value provided rounded down
81 static inline uintptr_t LOG2_DOWN(uintptr_t value)
82 {
83         uintptr_t l = 0;
84         while( (value >> l) > 1 ) ++l;
85         return l;
86 }
87
88 // Return the integer logarithm of the value provided rounded up
89 static inline uintptr_t LOG2_UP(uintptr_t value)
90 {
91         uintptr_t _v = LOG2_DOWN(value);
92         if (value ^ (1 << _v))
93                 return _v + 1;
94         else
95                 return _v;
96 }
97
98 static inline uintptr_t ROUNDUPPWR2(uintptr_t value)
99 {
100         return 1 << LOG2_UP(value);
101 }
102
103 /* We wraparound if UINT_MAX < a * b, which is also UINT_MAX / a < b. */
104 static inline bool mult_will_overflow_u64(uint64_t a, uint64_t b)
105 {
106         if (!a)
107                 return FALSE;
108         return (uint64_t)(-1) / a < b;
109 }
110
111 // Return the offset of 'member' relative to the beginning of a struct type
112 #ifndef offsetof
113 #define offsetof(type, member)  ((size_t) (&((type*)0)->member))
114 #endif
115
116 /* Return the container/struct holding the object 'ptr' points to */
117 #define container_of(ptr, type, member) ({                                     \
118         (type*)((char*)ptr - offsetof(type, member));                             \
119 })
120
121 // Ivy currently can only handle 63 bits (OCaml thing), so use this to make
122 // a uint64_t programatically
123 #define UINT64(upper, lower) ( (((uint64_t)(upper)) << 32) | (lower) )
124
125 /* Makes sure func is run exactly once.  Can handle concurrent callers, and
126  * other callers spin til the func is complete. */
127 #define run_once(func)                                                         \
128 {                                                                              \
129         static bool ran_once = FALSE;                                              \
130         static atomic_t is_running = FALSE;                                        \
131         if (!ran_once) {                                                           \
132                 if (!atomic_swap(&is_running, TRUE)) {                                 \
133                         /* we won the race and get to run the func */                      \
134                         func;                                                              \
135                         wmb();  /* don't let the ran_once write pass previous writes */    \
136                         ran_once = TRUE;                                                   \
137                 } else {                                                               \
138                         /* someone else won, wait til they are done to break out */        \
139                         while (!ran_once)                                                  \
140                                 cpu_relax();                                                   \
141                                                                                \
142                 }                                                                      \
143         }                                                                          \
144 }
145
146 /* Unprotected, single-threaded version, makes sure func is run exactly once */
147 #define run_once_racy(func)                                                    \
148 {                                                                              \
149         static bool ran_once = FALSE;                                              \
150         if (!ran_once) {                                                           \
151                 func;                                                                  \
152                 ran_once = TRUE;                                                       \
153         }                                                                          \
154 }
155
156 /* Aborts with 'retcmd' if this function has already been called.  Compared to
157  * run_once, this is put at the top of a function that can be called from
158  * multiple sources but should only execute once. */
159 #define init_once_racy(retcmd)                                                 \
160 {                                                                              \
161         static bool initialized = FALSE;                                           \
162         if (initialized) {                                                         \
163                 retcmd;                                                                \
164         }                                                                          \
165         initialized = TRUE;                                                        \
166 }
167
168 #endif /* ROS_COMMON_H */