1 /* Copyright (c) 2013 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Simple ring-buffer tracing for in-kernel events. The rings have a
6 * power-of-two number of slots, and each entry size will be rounded up to the
7 * nearest power of two. Ring slot acquisition by default is thread-safe, but
8 * we provide racy helpers if you want a little less overhead.
10 * Users need to provide a contiguous memory buffer and the size of an event
11 * struct to init. For example:
13 * trace_ring_init(my_trace_ring_ptr, my_buf, buf_sz, event_sz);
15 * And then to store a trace, first get a slot, then fill it in:
17 * struct my_trace_event *my_trace = get_trace_slot(my_trace_ring_ptr);
18 * if (my_trace) // only need to check if we aren't overwriting
19 * my_trace = whatever;
21 * Later, to process the traces, provide a function pointer to
22 * trace_ring_foreach(). This performs the func on all traces in the ring,
23 * including the unused:
25 * void trace_handler(void *trace_event, void *data)
29 * trace_ring_foreach(my_trace_ring_ptr, trace_handler, optional_blob);
31 * Rings can be racy or not, and can overwrite entries or not. If you are not
32 * overwriting, the ring will stop giving you slots. You need to reset the ring
33 * to get fresh slots again. If you are overwriting, you don't need to check
34 * the return value of get_trace_slot_overwrite().
36 * Given there is overwrite, tr_next doesn't really tell us which ones were
37 * used. So your handler should check for a flag or something. Timestamps
38 * might help make sense of the data in these cases too. */
42 #include <ros/common.h>
46 unsigned char *tr_buf;
48 unsigned int tr_event_sz_shift;
50 unsigned long tr_next;
53 typedef void (*trace_handler_t)(void *event, void *blob);
55 static inline void *get_trace_slot(struct trace_ring *tr);
56 static inline void *get_trace_slot_overwrite(struct trace_ring *tr);
57 static inline void *get_trace_slot_racy(struct trace_ring *tr);
58 static inline void *get_trace_slot_overwrite_racy(struct trace_ring *tr);
60 void trace_ring_init(struct trace_ring *tr, void *buf, size_t buf_size,
62 void trace_ring_reset(struct trace_ring *tr);
63 void trace_ring_reset_and_clear(struct trace_ring *tr);
64 void trace_ring_foreach(struct trace_ring *tr, trace_handler_t tr_func,
67 /* Inlined funcs, declared above */
70 /* Get next trace ring slot with no wrapping */
71 static inline void *__get_tr_slot(struct trace_ring *tr, unsigned long ind)
73 dassert(0 <= ind && ind < tr->tr_max);
74 /* event sizes are rounded up to the nearest power of 2 (sz_shift) */
75 return (void *) (tr->tr_buf + (ind << tr->tr_event_sz_shift));
78 /* Get next trace ring slot with wrapping */
80 __get_tr_slot_overwrite(struct trace_ring *tr, unsigned long slot)
82 /* tr_max is a power of 2, we're ignoring the upper bits of slot */
83 slot &= tr->tr_max - 1;
84 return __get_tr_slot(tr, slot);
87 static inline void *get_trace_slot(struct trace_ring *tr)
89 /* Using syncs, instead of atomics, since we access tr_next as both atomic
91 unsigned long my_slot = __sync_fetch_and_add(&tr->tr_next, 1);
92 /* We can briefly go over, so long as we subtract back down to where we were
93 * before. This will work so long as we don't have ~2^64 threads... */
94 if (my_slot >= tr->tr_max) {
95 __sync_fetch_and_add(&tr->tr_next, -1);
98 return __get_tr_slot(tr, my_slot);
101 static inline void *get_trace_slot_overwrite(struct trace_ring *tr)
103 return __get_tr_slot_overwrite(tr, __sync_fetch_and_add(&tr->tr_next, 1));
106 static inline void *get_trace_slot_racy(struct trace_ring *tr)
108 unsigned long my_slot = tr->tr_next;
109 if (my_slot >= tr->tr_max)
112 return __get_tr_slot(tr, my_slot);
115 static inline void *get_trace_slot_overwrite_racy(struct trace_ring *tr)
117 return __get_tr_slot_overwrite(tr, tr->tr_next++);