x86: Rename VPD (XCC)
[akaros.git] / kern / include / ros / common.h
1 #pragma once
2
3 #ifndef __ASSEMBLER__
4
5 #include <stddef.h>
6 #include <stdint.h>
7 #include <sys/types.h>
8 #include <stdbool.h>
9 #include <string.h>
10
11 typedef uintptr_t physaddr_t;
12 typedef long intreg_t;
13 typedef unsigned long uintreg_t;
14
15 #ifndef NULL
16 #define NULL ((void*) 0)
17 #endif
18
19 #ifndef TRUE
20 #define TRUE    1
21 #endif
22
23 #ifndef FALSE
24 #define FALSE   0
25 #endif
26
27 #define FOR_CIRC_BUFFER(next, size, var) \
28         for (int _var = 0, var = (next); _var < (size); _var++, var = (var + 1) % (size))
29
30 #define STRINGIFY(s) __STRINGIFY(s)
31 #define __STRINGIFY(s) #s
32
33 /* A macro for testing if another macro has been #defined or not.  Can be used
34  * wherever you need a boolean defined.  Returns 0 or 1. */
35 #define is_defined(macro) is_defined_(macro)
36 #define is_defined_test_1 ,
37 #define is_defined_(value) is_defined__(is_defined_test_##value, value)
38 #define is_defined__(comma, value) is_defined___(comma 1, 0)
39 #define is_defined___(_, v, ...) v
40
41 #define COUNT_OF(x) (sizeof((x))/sizeof((x)[0]))
42
43 #define ZERO_DATA(s) memset(&(s), 0, sizeof(s))
44
45 /* Rounding operations (efficient when n is a power of 2)
46  * Round down to the nearest multiple of n.
47  * The compiler should compile out the branch.  This is needed for 32 bit, so
48  * that we can round down uint64_t, without chopping off the top 32 bits. */
49 #define ROUNDDOWN(a, n)                                                        \
50 ({                                                                             \
51         typeof(a) __b;                                                             \
52         if (sizeof(a) == 8) {                                                      \
53                 uint64_t __a = (uint64_t) (a);                                         \
54                 __b = (typeof(a)) (__a - __a % (n));                                   \
55         } else {                                                                   \
56                 uintptr_t __a = (uintptr_t) (a);                                       \
57                 __b = (typeof(a)) (__a - __a % (n));                                   \
58         }                                                                          \
59         __b;                                                                       \
60 })
61
62 /* Round up to the nearest multiple of n */
63 #define ROUNDUP(a, n)                                                          \
64 ({                                                                             \
65         typeof(a) __b;                                                             \
66         if (sizeof(a) == 8) {                                                      \
67                 uint64_t __n = (uint64_t) (n);                                         \
68                 __b = (typeof(a)) (ROUNDDOWN((uint64_t) (a) + __n - 1, __n));          \
69         } else {                                                                   \
70                 uintptr_t __n = (uintptr_t) (n);                                       \
71                 __b = (typeof(a)) (ROUNDDOWN((uintptr_t) (a) + __n - 1, __n));         \
72         }                                                                          \
73         __b;                                                                       \
74 })
75
76 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
77
78 // Return the integer logarithm of the value provided rounded down
79 static inline uintptr_t LOG2_DOWN(uintptr_t value)
80 {
81         value |= 1;  // clz(0) is undefined, just or in a 1 bit and define it
82         // intrinsic __builtin_clz supported by both > gcc4.6 and LLVM > 1.5
83         return (sizeof(value) == 8) ? 63 - __builtin_clzll(value)
84                                     : 31 - __builtin_clz(value);
85 }
86
87 // Return the integer logarithm of the value provided rounded up
88 static inline uintptr_t LOG2_UP(uintptr_t value)
89 {
90         uintptr_t ret = LOG2_DOWN(value);
91         ret += 0 != (value ^ ((uintptr_t) 1 << ret));  // Add 1 if a lower bit set
92         return ret;
93 }
94
95 static inline uintptr_t ROUNDUPPWR2(uintptr_t value)
96 {
97         return 1 << LOG2_UP(value);
98 }
99
100 static inline uintptr_t ROUNDDOWNPWR2(uintptr_t value)
101 {
102         return 1 << LOG2_DOWN(value);
103 }
104
105 /* We wraparound if UINT_MAX < a * b, which is also UINT_MAX / a < b. */
106 static inline bool mult_will_overflow_u64(uint64_t a, uint64_t b)
107 {
108         if (!a)
109                 return FALSE;
110         return (uint64_t)(-1) / a < b;
111 }
112
113 // Return the offset of 'member' relative to the beginning of a struct type
114 #ifndef offsetof
115 #define offsetof(type, member)  ((size_t) (&((type*)0)->member))
116 #endif
117
118 /* Return the container/struct holding the object 'ptr' points to */
119 #define container_of(ptr, type, member) ({                                     \
120         (type*)((char*)ptr - offsetof(type, member));                             \
121 })
122
123 /* Force the reading/writing exactly once of x.  You may still need mbs().  See
124  * http://lwn.net/Articles/508991/ for more info. */
125 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
126 #define READ_ONCE(x) ACCESS_ONCE(x)
127 #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *)&(x)) = val)
128
129 /* Makes sure func is run exactly once.  Can handle concurrent callers, and
130  * other callers spin til the func is complete. */
131 #define run_once(func)                                                         \
132 do {                                                                           \
133         static bool ran_once = FALSE;                                              \
134         static bool is_running = FALSE;                                            \
135         if (!ran_once) {                                                           \
136                 /* fetch and set TRUE, without a header or test_and_set weirdness */   \
137                 if (!__sync_fetch_and_or(&is_running, TRUE)) {                         \
138                         /* we won the race and get to run the func */                      \
139                         func;                                                              \
140                         wmb();  /* don't let the ran_once write pass previous writes */    \
141                         ran_once = TRUE;                                                   \
142                 } else {                                                               \
143                         /* someone else won, wait til they are done to break out */        \
144                         while (!ran_once)                                                  \
145                                 cpu_relax();                                                   \
146                 }                                                                      \
147         }                                                                          \
148 } while (0)
149
150 /* Unprotected, single-threaded version, makes sure func is run exactly once */
151 #define run_once_racy(func)                                                    \
152 do {                                                                           \
153         static bool ran_once = FALSE;                                              \
154         if (!ran_once) {                                                           \
155                 func;                                                                  \
156                 ran_once = TRUE;                                                       \
157         }                                                                          \
158 } while (0)
159
160 #endif /* __ASSEMBLER__ */