Inlines spinlocks when not using SPINLOCK_DEBUG
[akaros.git] / kern / src / atomic.c
index 557899e..9a49bfc 100644 (file)
@@ -23,33 +23,40 @@ static void decrease_lock_depth(uint32_t coreid)
        per_cpu_info[coreid].lock_depth--;
 }
 
-/* TODO: make this inline if we aren't doing DEBUG? */
+#ifdef __CONFIG_SPINLOCK_DEBUG__
 void spin_lock(spinlock_t *lock)
 {
-#ifdef __CONFIG_SPINLOCK_DEBUG__
        uint32_t coreid = core_id();
+       struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+       /* TODO: don't print directly.  If we have a lock on the print path that
+        * fails, we'll recurse and/or deadlock */
+       if (lock->irq_okay) {
+               if (!can_spinwait_irq(pcpui)) {
+                       print_kctx_depths("IRQOK");
+                       panic("Lock %08p tried to spin when it shouldn't\n", lock);
+               }
+       } else {
+               if (!can_spinwait_noirq(pcpui)) {
+                       print_kctx_depths("NOIRQ");
+                       panic("Lock %08p tried to spin when it shouldn't\n", lock);
+               }
+       }
        __spin_lock(lock);
        lock->call_site = get_caller_pc();
        lock->calling_core = coreid;
        /* TODO consider merging this with __ctx_depth (unused field) */
        increase_lock_depth(lock->calling_core);
-#else
        __spin_lock(lock);
-#endif
-       cmb();  /* need cmb(), the CPU mb() was handled by the arch-specific xchg */
+       /* Memory barriers are handled by the particular arches */
 }
 
 void spin_unlock(spinlock_t *lock)
 {
-#ifdef __CONFIG_SPINLOCK_DEBUG__
        decrease_lock_depth(lock->calling_core);
-#endif
-       /* Need to prevent the compiler (and some arches) from reordering older
-        * stores. */
-       wmb();
-       rwmb(); /* x86 makes both of these a cmb() */
+       /* Memory barriers are handled by the particular arches */
        __spin_unlock(lock);
 }
+#endif /* __CONFIG_SPINLOCK_DEBUG__ */
 
 /* Inits a hashlock. */
 void hashlock_init(struct hashlock *hl, unsigned int nr_entries)
@@ -246,7 +253,7 @@ void down_checklist(checklist_t* list)
 /* Barriers */
 void init_barrier(barrier_t* barrier, uint32_t count)
 {
-       spinlock_init(&barrier->lock);
+       spinlock_init_irqsave(&barrier->lock);
        barrier->init_count = count;
        barrier->current_count = count;
        barrier->ready = 0;