Inlines spinlocks when not using SPINLOCK_DEBUG
[akaros.git] / kern / arch / i686 / atomic.h
index 5bf2021..839cb3e 100644 (file)
@@ -144,7 +144,7 @@ static inline bool spin_locked(spinlock_t *lock)
        return lock->rlock & 0xff;
 }
 
-static inline void __spin_lock(volatile uint32_t *rlock)
+static inline void __spin_lock_raw(volatile uint32_t *rlock)
 {
        asm volatile(
                        "1:                       "
@@ -158,37 +158,25 @@ static inline void __spin_lock(volatile uint32_t *rlock)
                        "       cmpb $0, %%al;        "
                        "       jne 1b;               "
                : : "m"(*rlock) : "eax", "cc");
+       cmb();  /* need cmb(), the CPU mb() was handled by the xchg */
 }
 
-static inline void spin_lock(spinlock_t *lock)
+static inline void __spin_lock(spinlock_t *lock)
 {
-       __spin_lock(&lock->rlock);
-#ifdef __CONFIG_SPINLOCK_DEBUG__
-       lock->call_site = (void RACY*CT(1))TC(read_eip());
-       lock->calling_core = core_id();
-#endif
+       __spin_lock_raw(&lock->rlock);
 }
 
-static inline void spin_unlock(spinlock_t *lock)
+static inline void __spin_unlock(spinlock_t *lock)
 {
-       /* Need to prevent the compiler (and some arches) from reordering older
-        * stores */
+       /* Need to prevent the compiler from reordering older stores. */
        wmb();
+       rwmb(); /* x86 makes both of these a cmb() */
        lock->rlock = 0;
 }
 
-static inline void spinlock_init(spinlock_t *lock)
-#ifdef __CONFIG_SPINLOCK_DEBUG__
-WRITES(lock->rlock,lock->call_site,lock->calling_core)
-#else
-WRITES(lock->rlock)
-#endif
+static inline void __spinlock_init(spinlock_t *lock)
 {
        lock->rlock = 0;
-#ifdef __CONFIG_SPINLOCK_DEBUG__
-       lock->call_site = 0;
-       lock->calling_core = 0;
-#endif
 }
 
 #endif /* ROS_KERN_ARCH_ATOMIC_H */