cons: Support epolling /dev/null
[akaros.git] / user / parlib / spinlock.c
index 9ca395d..a833ab8 100644 (file)
 
 void spinlock_init(spinlock_t *lock)
 {
-       lock->lock = 0;
+       lock->locked = FALSE;
 }
 
-int spinlock_trylock(spinlock_t *lock) 
+/* Returns TRUE if we grabbed the lock */
+bool spinlock_trylock(spinlock_t *lock)
 {
-       if (lock->lock)
-               return EBUSY;
-       return __sync_lock_test_and_set(&lock->lock, EBUSY);
+       if (lock->locked)
+               return FALSE;
+       return !__sync_lock_test_and_set(&lock->locked, TRUE);
 }
 
 void spinlock_lock(spinlock_t *lock) 
 {
-       while (spinlock_trylock(lock))
+       while (!spinlock_trylock(lock))
                cpu_relax();
 }
 
 void spinlock_unlock(spinlock_t *lock) 
 {
-       __sync_lock_release(&lock->lock, 0);
+       __sync_lock_release(&lock->locked, FALSE);
 }
 
 bool spinlock_locked(spinlock_t *lock)
 {
-       return lock->lock != 0;
+       return lock->locked;
 }
 
 /* Spin-PRD locks (preemption detection/recovery).  Idea is to CAS and put the
  * lockholder's vcoreid in the lock, and all spinners ensure that vcore runs. */
 void spin_pdr_init(struct spin_pdr_lock *pdr_lock)
 {
+       /* See glibc-2.19-akaros/sysdeps/akaros/lowlevellock.h for details. */
+       parlib_static_assert(sizeof(struct spin_pdr_lock) == sizeof(int));
        pdr_lock->lock = SPINPDR_UNLOCKED;
 }
 
@@ -87,3 +90,21 @@ void spin_pdr_unlock(struct spin_pdr_lock *pdr_lock)
        /* Enable notifs, if we're an _M uthread */
        uth_enable_notifs();
 }
+
+bool spin_pdr_trylock(struct spin_pdr_lock *pdr_lock)
+{
+       uint32_t lock_val;
+
+       uth_disable_notifs();
+       lock_val = ACCESS_ONCE(pdr_lock->lock);
+       if (lock_val != SPINPDR_UNLOCKED) {
+               uth_enable_notifs();
+               return FALSE;
+       }
+       if (atomic_cas_u32(&pdr_lock->lock, lock_val, vcore_id())) {
+               return TRUE;
+       } else {
+               uth_enable_notifs();
+               return FALSE;
+       }
+}