3 #include <ros/common.h>
6 /* Force a rebuild of the whole kernel if 64BIT-ness changed */
10 #define SIZE_MAX (~(size_t)0)
12 // Efficient min and max operations
15 typeof(_a) __a = (_a); \
16 typeof(_b) __b = (_b); \
17 __a <= __b ? __a : __b; \
21 typeof(_a) __a = (_a); \
22 typeof(_b) __b = (_b); \
23 __a >= __b ? __a : __b; \
26 /* Test for alignment, e.g. 2^6 */
27 #define ALIGNED(p, a) (!(((uintptr_t)(p)) & ((a)-1)))
28 /* Aligns x up to the mask, e.g. (2^6 - 1) (round up if any mask bits are set)*/
29 #define __ALIGN_MASK(x, mask) (((uintptr_t)(x) + (mask)) & ~(mask))
30 /* Aligns x up to the alignment, e.g. 2^6. */
31 #define ALIGN(x, a) ((typeof(x)) __ALIGN_MASK(x, (a) - 1))
32 /* Aligns x down to the mask, e.g. (2^6 - 1)
33 * (round down if any mask bits are set)*/
34 #define __ALIGN_MASK_DOWN(x, mask) ((uintptr_t)(x) & ~(mask))
35 /* Aligns x down to the alignment, e.g. 2^6. */
36 #define ALIGN_DOWN(x, a) ((typeof(x)) __ALIGN_MASK_DOWN(x, (a) - 1))
37 /* Will return false for 0. Debatable, based on what you want. */
38 #define IS_PWR2(x) ((x) && !((x) & (x - 1)))
40 #define ARRAY_SIZE(x) COUNT_OF(x)
42 /* Makes sure func is run exactly once. Can handle concurrent callers, and
43 * other callers spin til the func is complete. */
44 #define run_once(func) \
46 static bool ran_once = FALSE; \
47 static bool is_running = FALSE; \
49 /* fetch and set TRUE, without a header or test_and_set weirdness */ \
50 if (!__sync_fetch_and_or(&is_running, TRUE)) { \
51 /* we won the race and get to run the func */ \
53 wmb(); /* don't let the ran_once write pass previous writes */ \
56 /* someone else won, wait til they are done to break out */ \
63 /* Unprotected, single-threaded version, makes sure func is run exactly once */
64 #define run_once_racy(func) \
66 static bool ran_once = FALSE; \
75 static inline uint32_t low32(uint64_t val)
77 return val & 0xffffffff;
80 static inline uint32_t high32(uint64_t val)
85 #endif /* !__ASSEMBLER__ */