spin_trylock()
authorBarret Rhoden <brho@cs.berkeley.edu>
Tue, 24 Sep 2013 23:11:23 +0000 (16:11 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Thu, 16 Jan 2014 02:11:36 +0000 (18:11 -0800)
When lock checking, it won't check for IRQs when trylocking.

kern/arch/riscv/atomic.h
kern/arch/x86/atomic.h
kern/include/atomic.h
kern/src/atomic.c

index 99a4e58..8ba30a6 100644 (file)
@@ -129,7 +129,7 @@ static inline bool spin_locked(spinlock_t* lock)
        return lock->rlock;
 }
 
-static inline uint32_t spin_trylock(spinlock_t* lock)
+static inline bool __spin_trylock(spinlock_t *lock)
 {
        return __sync_fetch_and_or(&lock->rlock, 1);
 }
@@ -140,7 +140,7 @@ static inline void __spin_lock(spinlock_t *lock)
        {
                while (lock->rlock)
                        ;
-       } while (spin_trylock(lock));
+       } while (__spin_trylock(lock));
        mb();
 }
 
index 687421a..29a11e1 100644 (file)
@@ -147,6 +147,13 @@ static inline void __spin_lock(spinlock_t *lock)
        __spin_lock_raw(&lock->rlock);
 }
 
+static inline bool __spin_trylock(spinlock_t *lock)
+{
+       /* since this is an or, we're not going to clobber the top bytes (if that
+        * matters) */
+       return __sync_fetch_and_or(&lock->rlock, 1);
+}
+
 static inline void __spin_unlock(spinlock_t *lock)
 {
        /* Need to prevent the compiler from reordering older stores. */
index 2c6919b..dbe2c68 100644 (file)
@@ -70,6 +70,7 @@ extern inline void __spin_unlock(spinlock_t *lock);
 #ifdef CONFIG_SPINLOCK_DEBUG
 /* Arch indep, in k/s/atomic.c */
 void spin_lock(spinlock_t *lock);
+bool spin_trylock(spinlock_t *lock);
 void spin_unlock(spinlock_t *lock);
 void spinlock_debug(spinlock_t *lock);
 
@@ -80,6 +81,11 @@ static inline void spin_lock(spinlock_t *lock)
        __spin_lock(lock);
 }
 
+static inline bool spin_trylock(spinlock_t *lock)
+{
+       return __spin_trylock(lock);
+}
+
 static inline void spin_unlock(spinlock_t *lock)
 {
        __spin_unlock(lock);
index ee7a4d4..10d7e25 100644 (file)
@@ -42,6 +42,18 @@ static bool can_trace(spinlock_t *lock)
        return TRUE;
 }
 
+/* spinlock and trylock call this after locking */
+static void post_lock(spinlock_t *lock, uint32_t coreid)
+{
+       struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+       if ((pcpui->__lock_checking_enabled == 1) && can_trace(lock))
+               pcpui_trace_locks(pcpui, lock);
+       lock->call_site = get_caller_pc();
+       lock->calling_core = coreid;
+       /* TODO consider merging this with __ctx_depth (unused field) */
+       increase_lock_depth(lock->calling_core);
+}
+
 void spin_lock(spinlock_t *lock)
 {
        uint32_t coreid = core_id_early();
@@ -68,13 +80,19 @@ void spin_lock(spinlock_t *lock)
        }
 lock:
        __spin_lock(lock);
-       if (can_trace(lock))
-               pcpui_trace_locks(pcpui, lock);
-       lock->call_site = get_caller_pc();
-       lock->calling_core = coreid;
-       /* TODO consider merging this with __ctx_depth (unused field) */
-       increase_lock_depth(lock->calling_core);
        /* Memory barriers are handled by the particular arches */
+       post_lock(lock, coreid);
+}
+
+/* Trylock doesn't check for irq/noirq, in case we want to try and lock a
+ * non-irqsave lock from irq context. */
+bool spin_trylock(spinlock_t *lock)
+{
+       uint32_t coreid = core_id_early();
+       bool ret = __spin_trylock(lock);
+       if (ret)
+               post_lock(lock, coreid);
+       return ret;
 }
 
 void spin_unlock(spinlock_t *lock)