Added perfmon interrupt handling to allow overflow based profiling
[akaros.git] / kern / arch / x86 / atomic.h
index 29a11e1..2aebdba 100644 (file)
@@ -4,8 +4,7 @@
  *
  * x86 atomics and locking functions. */
 
-#ifndef ROS_KERN_ARCH_ATOMIC_H
-#define ROS_KERN_ARCH_ATOMIC_H
+#pragma once
 
 #include <ros/common.h>
 #include <ros/arch/membar.h>
@@ -127,6 +126,7 @@ static inline bool spin_locked(spinlock_t *lock)
 
 static inline void __spin_lock_raw(volatile uint32_t *rlock)
 {
+       uint8_t dicks = 0;
        asm volatile(
                        "1:                       "
                        "       cmpb $0, %0;          "
@@ -134,11 +134,11 @@ static inline void __spin_lock_raw(volatile uint32_t *rlock)
                        "       pause;                "
                        "       jmp 1b;               "
                        "2:                       " 
-                       "       movb $1, %%al;        "
-                       "       xchgb %%al, %0;       "
-                       "       cmpb $0, %%al;        "
+                       "       movb $1, %1;          "
+                       "       xchgb %1, %0;         "
+                       "       cmpb $0, %1;          "
                        "       jne 1b;               "
-               : : "m"(*rlock) : "eax", "cc");
+               : : "m"(*rlock), "r"(dicks) : "cc");
        cmb();  /* need cmb(), the CPU mb() was handled by the xchg */
 }
 
@@ -151,7 +151,7 @@ static inline bool __spin_trylock(spinlock_t *lock)
 {
        /* since this is an or, we're not going to clobber the top bytes (if that
         * matters) */
-       return __sync_fetch_and_or(&lock->rlock, 1);
+       return !__sync_fetch_and_or(&lock->rlock, 1);
 }
 
 static inline void __spin_unlock(spinlock_t *lock)
@@ -166,5 +166,3 @@ static inline void __spinlock_init(spinlock_t *lock)
 {
        lock->rlock = 0;
 }
-
-#endif /* ROS_KERN_ARCH_ATOMIC_H */