1 /* Copyright (c) 2012 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Post work and poke synchronization. This is a wait-free way to make sure
6 * some code is run, usually by the calling core, but potentially by any core.
7 * Under contention, everyone just posts work, and one core will carry out the
8 * work. Callers post work (the meaning of which is particular to their
9 * subsystem), then call this function. The function is not run concurrently
12 * As far as uthreads, vcores, and preemption go, poking is safe in uthread
13 * context and if preemptions occur. However, a uthread running the poke
14 * function that gets preempted could delay the execution of the poke
15 * indefinitely. In general, post-and-poke does not provide any guarantee about
16 * *when* the poke finally occurs. If delays of this sort are a problem, then
17 * run poke() from vcore context.
19 * Adapted from the kernel's implementation. */
21 #include <parlib/poke.h>
22 #include <parlib/arch/atomic.h>
23 #include <parlib/assert.h>
25 /* This is the 'post (work) and poke' style of sync. We make sure the poke
26 * tracker's function runs. Once this returns, the func either has run or is
27 * currently running (in case someone else is running now). We won't wait or
28 * spin or anything, and it is safe to call this recursively (deeper in the
31 * It's up to the caller to somehow post its work. We'll also pass arg to the
32 * func, ONLY IF the caller is the one to execute it - so there's no guarantee
33 * the func(specific_arg) combo will actually run. It's more for info
34 * purposes/optimizations/etc. If no one uses it, I'll get rid of it. */
35 void poke(struct poke_tracker *tracker, void *arg)
37 atomic_set(&tracker->need_to_run, TRUE);
38 /* will need to repeatedly do it if someone keeps posting work */
40 /* want an wrmb() btw posting work/need_to_run and in_progress. the
41 * swap provides the HW mb. just need a cmb, which we do in the loop to
42 * cover the iterations (even though i can't imagine the compiler
43 * reordering the check it needed to do for the branch).. */
45 /* poke / make sure someone does it. if we get a TRUE (1) back, someone
46 * is already running and will deal with the posted work. (probably on
47 * their next loop). if we got a 0 back, we won the race and have the
49 if (atomic_swap(&tracker->run_in_progress, TRUE))
51 /* if we're here, then we're the one who needs to run the func. */
52 /* clear the 'need to run', since we're running it now. new users will
53 * set it again. this write needs to be wmb()'d after in_progress. the
54 * swap provided the HW mb(). */
56 atomic_set(&tracker->need_to_run, FALSE); /* no internal HW mb */
57 /* run the actual function. the poke sync makes sure only one caller is
58 * in that func at a time. */
59 assert(tracker->func);
61 wmb(); /* ensure the in_prog write comes after the run_again. */
62 atomic_set(&tracker->run_in_progress, FALSE); /* no internal HW mb */
63 /* in_prog write must come before run_again read */
65 } while (atomic_read(&tracker->need_to_run)); /* while there's more work*/