Updated memory barrier stuff
[akaros.git] / user / parlib / mcs.c
index e031491..d06e361 100644 (file)
@@ -3,49 +3,89 @@
 #include <arch/atomic.h>
 #include <string.h>
 #include <stdlib.h>
+#include <uthread.h>
 
 // MCS locks
-void mcs_lock_init(mcs_lock_t* lock)
+void mcs_lock_init(struct mcs_lock *lock)
 {
        memset(lock,0,sizeof(mcs_lock_t));
 }
 
-static inline mcs_lock_qnode_t* mcs_qnode_swap(mcs_lock_qnode_t** addr, mcs_lock_qnode_t* val)
+static inline mcs_lock_qnode_t *mcs_qnode_swap(mcs_lock_qnode_t **addr,
+                                               mcs_lock_qnode_t *val)
 {
-       return (mcs_lock_qnode_t*)atomic_swap((int*)addr,(int)val);
+       return (mcs_lock_qnode_t*)atomic_swap_ptr((void**)addr, val);
 }
 
-void mcs_lock_lock(mcs_lock_t* lock)
+void mcs_lock_lock(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
 {
-       mcs_lock_qnode_t* qnode = &lock->qnode[vcore_id()];
        qnode->next = 0;
-       mcs_lock_qnode_t* predecessor = mcs_qnode_swap(&lock->lock,qnode);
-       if(predecessor)
-       {
+       cmb();  /* swap provides a CPU mb() */
+       mcs_lock_qnode_t *predecessor = mcs_qnode_swap(&lock->lock, qnode);
+       if (predecessor) {
                qnode->locked = 1;
+               wmb();
                predecessor->next = qnode;
-               while(qnode->locked);
+               /* no need for a wrmb(), since this will only get unlocked after they
+                * read our previous write */
+               while (qnode->locked)
+                       cpu_relax();
        }
+       cmb();  /* just need a cmb, the swap handles the CPU wmb/wrmb() */
 }
 
-void mcs_lock_unlock(mcs_lock_t* lock)
+void mcs_lock_unlock(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
 {
-       mcs_lock_qnode_t* qnode = &lock->qnode[vcore_id()];
-       if(qnode->next == 0)
-       {
-               mcs_lock_qnode_t* old_tail = mcs_qnode_swap(&lock->lock,0);
-               if(old_tail == qnode)
+       if (qnode->next == 0) {
+               cmb();  /* no need for CPU mbs, since there's an atomic_swap() */
+               mcs_lock_qnode_t *old_tail = mcs_qnode_swap(&lock->lock,0);
+               if (old_tail == qnode)
                        return;
-
-               mcs_lock_qnode_t* usurper = mcs_qnode_swap(&lock->lock,old_tail);
-               while(qnode->next == 0);
-               if(usurper)
+               mcs_lock_qnode_t *usurper = mcs_qnode_swap(&lock->lock,old_tail);
+               while (qnode->next == 0)
+                       cpu_relax();
+               if (usurper)
                        usurper->next = qnode->next;
                else
                        qnode->next->locked = 0;
-       }
-       else
+       } else {
+               /* mb()s necessary since we didn't call an atomic_swap() */
+               wmb();  /* need to make sure any previous writes don't pass unlocking */
+               rwmb(); /* need to make sure any reads happen before the unlocking */
                qnode->next->locked = 0;
+       }
+}
+
+/* We don't bother saving the state, like we do with irqsave, since we can use
+ * whether or not we are in vcore context to determine that.  This means you
+ * shouldn't call this from those moments when you fake being in vcore context
+ * (when switching into the TLS, etc). */
+void mcs_lock_notifsafe(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
+{
+       if (!in_vcore_context()) {
+               if (current_uthread)
+                       current_uthread->flags |= UTHREAD_DONT_MIGRATE;
+               cmb();  /* don't issue the flag write before the vcore_id() read */
+               disable_notifs(vcore_id());
+               cmb();  /* don't issue the flag write before the disable */
+               if (current_uthread)
+                       current_uthread->flags &= ~UTHREAD_DONT_MIGRATE;
+       }
+       mcs_lock_lock(lock, qnode);
+}
+
+void mcs_unlock_notifsafe(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
+{
+       mcs_lock_unlock(lock, qnode);
+       if (!in_vcore_context() && in_multi_mode()) {
+               if (current_uthread)
+                       current_uthread->flags |= UTHREAD_DONT_MIGRATE;
+               cmb();  /* don't issue the flag write before the vcore_id() read */
+               enable_notifs(vcore_id());
+               cmb();  /* don't issue the flag write before the enable */
+               if (current_uthread)
+                       current_uthread->flags &= ~UTHREAD_DONT_MIGRATE;
+       }
 }
 
 // MCS dissemination barrier!