Set errno when only errstr was called
[akaros.git] / kern / src / atomic.c
index dac394c..f019e03 100644 (file)
@@ -3,6 +3,7 @@
 #endif
 
 #include <arch/arch.h>
+#include <arch/kdebug.h>
 
 #include <bitmask.h>
 #include <atomic.h>
 #include <assert.h>
 #include <hashtable.h>
 #include <smp.h>
+#include <kmalloc.h>
+#include <kdebug.h>
 
-void increase_lock_depth(uint32_t coreid)
+static void increase_lock_depth(uint32_t coreid)
 {
        per_cpu_info[coreid].lock_depth++;
 }
 
-void decrease_lock_depth(uint32_t coreid)
+static void decrease_lock_depth(uint32_t coreid)
 {
        per_cpu_info[coreid].lock_depth--;
 }
 
+#ifdef CONFIG_SPINLOCK_DEBUG
+void spin_lock(spinlock_t *lock)
+{
+       uint32_t coreid = core_id_early();
+       struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+       /* Short circuit our lock checking, so we can print or do other things to
+        * announce the failure that require locks.  Also avoids anything else
+        * requiring pcpui initialization. */
+       if (pcpui->__lock_checking_enabled != 1)
+               goto lock;
+       if (lock->irq_okay) {
+               if (!can_spinwait_irq(pcpui)) {
+                       pcpui->__lock_checking_enabled--;
+                       print_kctx_depths("IRQOK");
+                       panic("Lock %p tried to spin when it shouldn't\n", lock);
+                       pcpui->__lock_checking_enabled++;
+               }
+       } else {
+               if (!can_spinwait_noirq(pcpui)) {
+                       pcpui->__lock_checking_enabled--;
+                       print_kctx_depths("NOIRQ");
+                       panic("Lock %p tried to spin when it shouldn't\n", lock);
+                       pcpui->__lock_checking_enabled++;
+               }
+       }
+       pcpui_trace_locks(pcpui, lock);
+lock:
+       __spin_lock(lock);
+       lock->call_site = get_caller_pc();
+       lock->calling_core = coreid;
+       /* TODO consider merging this with __ctx_depth (unused field) */
+       increase_lock_depth(lock->calling_core);
+       /* Memory barriers are handled by the particular arches */
+}
+
+void spin_unlock(spinlock_t *lock)
+{
+       decrease_lock_depth(lock->calling_core);
+       /* Memory barriers are handled by the particular arches */
+       __spin_unlock(lock);
+}
+
+void spinlock_debug(spinlock_t *lock)
+{
+       uintptr_t pc = lock->call_site;
+       char *func_name;
+
+       if (!pc) {
+               printk("Lock %p: never locked\n", lock);
+               return;
+       }
+       func_name = get_fn_name(pc);
+       printk("Lock %p: last locked at [<%p>] in %s on core %d\n", lock, pc,
+              func_name, lock->calling_core);
+       kfree(func_name);
+}
+
+#endif /* CONFIG_SPINLOCK_DEBUG */
+
 /* Inits a hashlock. */
 void hashlock_init(struct hashlock *hl, unsigned int nr_entries)
 {
@@ -33,6 +95,16 @@ void hashlock_init(struct hashlock *hl, unsigned int nr_entries)
        }
 }
 
+void hashlock_init_irqsave(struct hashlock *hl, unsigned int nr_entries)
+{
+       hl->nr_entries = nr_entries;
+       /* this is the right way to do it, though memset is faster.  If we ever
+        * find that this is taking a lot of time, we can change it. */
+       for (int i = 0; i < hl->nr_entries; i++) {
+               spinlock_init_irqsave(&hl->locks[i]);
+       }
+}
+
 /* Helper, gets the specific spinlock for a hl/key combo. */
 static spinlock_t *get_spinlock(struct hashlock *hl, long key)
 {
@@ -207,7 +279,7 @@ void down_checklist(checklist_t* list)
 /* Barriers */
 void init_barrier(barrier_t* barrier, uint32_t count)
 {
-       spinlock_init(&barrier->lock);
+       spinlock_init_irqsave(&barrier->lock);
        barrier->init_count = count;
        barrier->current_count = count;
        barrier->ready = 0;