Set errno when only errstr was called
[akaros.git] / kern / src / atomic.c
index 24448bb..f019e03 100644 (file)
@@ -12,6 +12,8 @@
 #include <assert.h>
 #include <hashtable.h>
 #include <smp.h>
+#include <kmalloc.h>
+#include <kdebug.h>
 
 static void increase_lock_depth(uint32_t coreid)
 {
@@ -23,30 +25,32 @@ static void decrease_lock_depth(uint32_t coreid)
        per_cpu_info[coreid].lock_depth--;
 }
 
-#ifdef __CONFIG_SPINLOCK_DEBUG__
+#ifdef CONFIG_SPINLOCK_DEBUG
 void spin_lock(spinlock_t *lock)
 {
-       uint32_t coreid = core_id();
+       uint32_t coreid = core_id_early();
        struct per_cpu_info *pcpui = &per_cpu_info[coreid];
        /* Short circuit our lock checking, so we can print or do other things to
-        * announce the failure that require locks. */
-       if (pcpui->__lock_depth_disabled)
+        * announce the failure that require locks.  Also avoids anything else
+        * requiring pcpui initialization. */
+       if (pcpui->__lock_checking_enabled != 1)
                goto lock;
        if (lock->irq_okay) {
                if (!can_spinwait_irq(pcpui)) {
-                       pcpui->__lock_depth_disabled++;
+                       pcpui->__lock_checking_enabled--;
                        print_kctx_depths("IRQOK");
                        panic("Lock %p tried to spin when it shouldn't\n", lock);
-                       pcpui->__lock_depth_disabled--;
+                       pcpui->__lock_checking_enabled++;
                }
        } else {
                if (!can_spinwait_noirq(pcpui)) {
-                       pcpui->__lock_depth_disabled++;
+                       pcpui->__lock_checking_enabled--;
                        print_kctx_depths("NOIRQ");
                        panic("Lock %p tried to spin when it shouldn't\n", lock);
-                       pcpui->__lock_depth_disabled--;
+                       pcpui->__lock_checking_enabled++;
                }
        }
+       pcpui_trace_locks(pcpui, lock);
 lock:
        __spin_lock(lock);
        lock->call_site = get_caller_pc();
@@ -62,7 +66,23 @@ void spin_unlock(spinlock_t *lock)
        /* Memory barriers are handled by the particular arches */
        __spin_unlock(lock);
 }
-#endif /* __CONFIG_SPINLOCK_DEBUG__ */
+
+void spinlock_debug(spinlock_t *lock)
+{
+       uintptr_t pc = lock->call_site;
+       char *func_name;
+
+       if (!pc) {
+               printk("Lock %p: never locked\n", lock);
+               return;
+       }
+       func_name = get_fn_name(pc);
+       printk("Lock %p: last locked at [<%p>] in %s on core %d\n", lock, pc,
+              func_name, lock->calling_core);
+       kfree(func_name);
+}
+
+#endif /* CONFIG_SPINLOCK_DEBUG */
 
 /* Inits a hashlock. */
 void hashlock_init(struct hashlock *hl, unsigned int nr_entries)