1 /* Copyright (c) 2010 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Multi-producer, multi-consumer queues. Designed initially for an untrusted
11 #include <ros/common.h>
12 #include <ros/bcq_struct.h>
15 /* Pain in the ass includes. Glibc has an atomic.h, and eventually userspace
16 * will have to deal with the clobbering. */
19 /* dequeue uses relax_vc, which is user-only. Some kernel tests call dequeue.*/
20 #define cpu_relax_vc(x) cpu_relax()
22 #include <parlib/arch/atomic.h>
23 #include <parlib/vcore.h>
24 #endif /* ROS_KERNEL */
26 /* Bounded Concurrent Queues, untrusted consumer
28 * This is a producer/consumer circular buffer, in which the producer never
29 * blocks and does not need to trust the data structure (which is writable by
32 * A producer enqueues an item, based on the indexes of the producer and
33 * consumer. Enqueue cannot block, but can fail if the queue is full or if it
34 * fails to enqueue after a certain amount of tries.
36 * prod_idx: the next item to be produced
37 * cons_pvt_idx: the next item a consumer can claim
38 * cons_pub_idx: the last item (farthest left / oldest) that hasn't been
39 * consumed/made ready to be clobbered by the producer (it is
40 * what the consumer produces). Once all are clear, this will be
41 * the same as the prod_idx.
43 * The number of free slots in the buffer is: BufSize - (prod_idx - cons_pub)
44 * The power-of-two nature of the number of elements makes this work when it
45 * wraps around, just like with Xen. Check it yourself with a counter of 8 and
46 * bufsizes of 8 and 4.
51 * Producers compete among themselves, using the prod_idx, to get a free spot.
52 * Once they have a spot, they fill in the item, and then toggle the "ready for
53 * consumption" bool for a client. If it cannot find one after a number of
54 * tries, it simply fails (could be a DoS from the client).
56 * Consumers fight with their private index, which they use to determine who is
57 * consuming which item. If there is an unconsumed item, they try to advance
58 * the pvt counter. If they succeed, they can consume the item. The item
59 * might not be there yet, so they must spin until it is there. Then, the
60 * consumer copies the item out, and clears the bool (rdy_for_cons).
62 * At this point, the consumer needs to make sure the pub_idx is advanced
63 * enough so the producer knows the item is free. If pub_idx was their item,
64 * they move it forward to the next item. If it is not, currently, they spin
65 * and wait until the previous consumer finishes, and then move it forward.
66 * This isn't ideal, and we can deal with this in the future.
68 * Enqueue will enqueue the item pointed to by elem. Dequeue will write an
69 * item into the memory pointed to by elem.
71 * The number of items must be a power of two. In the future, we'll probably
72 * use Xen's rounding macros. Not using powers of two is a pain, esp with mods
73 * of negative numbers.
75 * Here's how to use it:
77 * DEFINE_BCQ_TYPES(my_name, my_type, my_size);
78 * struct my_name_bcq some_bcq;
79 * bcq_init(&some_bcq, my_type, my_size);
81 * bcq_enqueue(&some_bcq, &some_my_type, my_size, num_fails_okay);
82 * bcq_dequeue(&some_bcq, &some_my_type, my_size);
84 * They both return 0 on success, or some error code on failure.
87 * Automatically round up.
89 * Watch out for ABA. Could use ctrs in the top of the indexes. Not really an
90 * issue, since that would be a wraparound.
92 * Consumers could also just set their bit, and have whoever has the pub_idx
93 * right before them be the one to advance it all the way up.
95 * Using uint32_t for now, since that's the comp_and_swap we have. We'll
96 * probably get other sizes once we're sure we like the current one. */
98 #if 0 // Defined in the included header
101 uint32_t prod_idx; /* next to be produced in */
102 uint32_t cons_pub_idx; /* last completely consumed */
103 uint32_t cons_pvt_idx; /* last a consumer has dibs on */
106 // This is there too:
107 #define DEFINE_BCQ_TYPES(__name, __elem_t, __num_elems)
112 #define bcq_init(_bcq, _ele_type, _num_elems) \
114 memset((_bcq), 0, sizeof(*(_bcq))); \
115 assert((_num_elems) == ROUNDUPPWR2(_num_elems)); \
118 /* Num empty buffer slots in the BCQ */
119 #define BCQ_FREE_SLOTS(_p, _cp, _ne) ((_ne) - ((_p) - (_cp)))
122 #define BCQ_EMPTY(_p, _cp, _ne) ((_ne) == BCQ_FREE_SLOTS(_p, _cp, _ne))
124 /* All work claimed by a consumer */
125 #define BCQ_NO_WORK(_p, _cpv) ((_p) == (_cpv))
128 #define BCQ_FULL(_p, _cp, _ne) (0 == BCQ_FREE_SLOTS(_p, _cp, _ne))
130 /* Figure out the slot you want, bail if it's full, or if you failed too many
131 * times, CAS to set the new prod. Fill yours in, toggle the bool. Sorry, the
132 * macro got a bit ugly, esp with the __retval hackery. */
133 #define bcq_enqueue(_bcq, _elem, _num_elems, _num_fail) \
135 uint32_t __prod, __new_prod, __cons_pub, __failctr = 0; \
139 if (((_num_fail)) && (__failctr++ >= (_num_fail))) { \
143 __prod = (_bcq)->hdr.prod_idx; \
144 __cons_pub = (_bcq)->hdr.cons_pub_idx; \
145 if (BCQ_FULL(__prod, __cons_pub, (_num_elems))) { \
149 __new_prod = __prod + 1; \
150 } while (!atomic_cas_u32(&(_bcq)->hdr.prod_idx, __prod, __new_prod)); \
152 /* from here out, __prod is the local __prod that we won */ \
153 (_bcq)->wraps[__prod & ((_num_elems)-1)].elem = *(_elem); \
155 (_bcq)->wraps[__prod & ((_num_elems)-1)].rdy_for_cons = TRUE; \
160 /* Similar to enqueue, spin afterwards til cons_pub is our element, then
162 #define bcq_dequeue(_bcq, _elem, _num_elems) \
164 uint32_t __prod, __cons_pvt, __new_cons_pvt, __cons_pub; \
168 __prod = (_bcq)->hdr.prod_idx; \
169 __cons_pvt = (_bcq)->hdr.cons_pvt_idx; \
170 if (BCQ_NO_WORK(__prod, __cons_pvt)) { \
174 __new_cons_pvt = (__cons_pvt + 1); \
175 } while (!atomic_cas_u32(&(_bcq)->hdr.cons_pvt_idx, __cons_pvt, \
178 /* from here out, __cons_pvt is the local __cons_pvt that we */\
179 /* won. wait for the producer to finish copying it in */ \
180 while (!(_bcq)->wraps[__cons_pvt & ((_num_elems)-1)].rdy_for_cons) \
182 *(_elem) = (_bcq)->wraps[__cons_pvt & ((_num_elems)-1)].elem; \
183 (_bcq)->wraps[__cons_pvt & ((_num_elems)-1)].rdy_for_cons = FALSE; \
184 /* wait til we're the cons_pub, then advance it by one */ \
185 while ((_bcq)->hdr.cons_pub_idx != __cons_pvt) \
186 cpu_relax_vc(vcore_id()); \
187 (_bcq)->hdr.cons_pub_idx = __cons_pvt + 1; \
192 /* Checks of a bcq is empty (meaning no work), instead of trying to dequeue */
193 #define bcq_empty(_bcq) \
194 BCQ_NO_WORK((_bcq)->hdr.prod_idx, (_bcq)->hdr.cons_pvt_idx)
196 #define bcq_nr_full(_bcq) \
197 ((_bcq)->hdr.prod_idx - (_bcq)->hdr.cons_pub_idx)