Fixes some CAS loops
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 19 May 2011 21:09:36 +0000 (14:09 -0700)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:36:03 +0000 (17:36 -0700)
The old loops were caching sysc->flags in a register.

For the uthread code, those should have been atomic reads, which forces
a re-read.  I added the cmb()s in case the code changes in the future.
In general, comp_swap technically doesn't need "memory", and any loops
using it ought to provide their own cmb()s.  Still, I put it in CAS just
to avoid future bugs.

kern/arch/i686/atomic.h
kern/include/ros/bcq.h
user/parlib/include/i686/atomic.h
user/parlib/uthread.c

index 5517203..fae5c9b 100644 (file)
@@ -128,7 +128,7 @@ static inline bool atomic_comp_swap(uint32_t *addr, uint32_t exp_val,
        asm volatile("lock cmpxchgl %4,%1; movl $0,%%eax; sete %%al"
                     : "=a"(exp_val), "=m"(*addr)
                     : "m"(*addr), "a"(exp_val), "r"(new_val)
-                    : "cc");
+                    : "cc", "memory");
        return exp_val;
 }
 
index b8a7f76..53761eb 100644 (file)
@@ -127,6 +127,7 @@ struct bcq_header {
        uint32_t __prod, __new_prod, __cons_pub, __failctr = 0;                    \
        int __retval = 0;                                                          \
        do {                                                                       \
+               cmb();                                                                 \
                if (((_num_fail)) && (__failctr++ >= (_num_fail))) {                   \
                        __retval = -EFAIL;                                                 \
                        break;                                                             \
@@ -154,6 +155,7 @@ struct bcq_header {
        uint32_t __prod, __cons_pvt, __new_cons_pvt, __cons_pub;                   \
        int __retval = 0;                                                          \
        do {                                                                       \
+               cmb();                                                                 \
                __prod = (_bcq)->hdr.prod_idx;                                         \
                __cons_pvt = (_bcq)->hdr.cons_pvt_idx;                                 \
                if (BCQ_NO_WORK(__prod, __cons_pvt)) {                                 \
index 00f7fcc..f7358a2 100644 (file)
@@ -62,7 +62,7 @@ static inline bool atomic_comp_swap(uint32_t *addr, uint32_t exp_val,
        asm volatile("lock cmpxchgl %4,%1; movl $0,%%eax; sete %%al"
                     : "=a"(exp_val), "=m"(*addr)
                     : "m"(*addr), "a"(exp_val), "r"(new_val)
-                    : "cc");
+                    : "cc", "memory");
        return exp_val;
 }
 
index 0161384..b109b4a 100644 (file)
@@ -330,7 +330,8 @@ bool register_evq(struct syscall *sysc, struct event_queue *ev_q)
        wmb();
        /* Try and set the SC_UEVENT flag (so the kernel knows to look at ev_q) */
        do {
-               old_flags = sysc->flags;
+               cmb();
+               old_flags = atomic_read(&sysc->flags);
                /* If the kernel finishes while we are trying to sign up for an event,
                 * we need to bail out */
                if (old_flags & (SC_DONE | SC_PROGRESS)) {
@@ -353,7 +354,8 @@ void deregister_sysc(struct syscall *sysc)
        int old_flags;
        /* Try and unset the SC_UEVENT flag */
        do {
-               old_flags = sysc->flags;
+               cmb();
+               old_flags = atomic_read(&sysc->flags);
                /* Note we don't care if the SC_DONE flag is getting set.  We just need
                 * to avoid clobbering flags */
        } while (!atomic_comp_swap(&sysc->flags, old_flags, old_flags & ~SC_UEVENT));
@@ -367,7 +369,8 @@ bool reregister_sysc(struct syscall *sysc)
        int old_flags;
        /* Try and set the SC_UEVENT flag (so the kernel knows to look at ev_q) */
        do {
-               old_flags = sysc->flags;
+               cmb();
+               old_flags = atomic_read(&sysc->flags);
                /* If the kernel finishes while we are trying to sign up for an event,
                 * we need to bail out */
                if (old_flags & (SC_DONE | SC_PROGRESS)) {