Spinlock checking is disabled by default
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 4 Sep 2013 22:33:41 +0000 (15:33 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 4 Sep 2013 22:55:07 +0000 (15:55 -0700)
Reversing depth_disabled to checking_enabled.  Now, the extra debugging won't
get turned on til we're ready for it.

kern/arch/x86/trap.c
kern/arch/x86/trap32.c
kern/arch/x86/trap64.c
kern/include/smp.h
kern/src/atomic.c
kern/src/init.c
kern/src/smp.c

index bbc80c9..61f0d65 100644 (file)
@@ -213,10 +213,10 @@ static void handle_fperr(struct hw_trapframe *hw_tf)
 void backtrace_kframe(struct hw_trapframe *hw_tf)
 {
        struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
-       pcpui->__lock_depth_disabled++;
+       pcpui->__lock_checking_enabled--;
        printk("\nBacktrace of faulting kernel context on Core %d:\n", core_id());
        backtrace_frame(x86_get_hwtf_pc(hw_tf), x86_get_hwtf_fp(hw_tf));
-       pcpui->__lock_depth_disabled--;
+       pcpui->__lock_checking_enabled++;
 }
 
 /* Certain traps want IRQs enabled, such as the syscall.  Others can't handle
@@ -230,7 +230,7 @@ static void trap_dispatch(struct hw_trapframe *hw_tf)
                        /* Temporarily disable deadlock detection when we print.  We could
                         * deadlock if we were printing when we NMIed. */
                        pcpui = &per_cpu_info[core_id()];
-                       pcpui->__lock_depth_disabled++;
+                       pcpui->__lock_checking_enabled--;
                        /* This is a bit hacky, but we don't have a decent API yet */
                        extern bool mon_verbose_trace;
                        if (mon_verbose_trace) {
@@ -242,7 +242,7 @@ static void trap_dispatch(struct hw_trapframe *hw_tf)
                               fn_name);
                        kfree(fn_name);
                        print_kmsgs(core_id());
-                       pcpui->__lock_depth_disabled--;
+                       pcpui->__lock_checking_enabled++;
                        break;
                case T_BRKPT:
                        enable_irq();
@@ -252,7 +252,7 @@ static void trap_dispatch(struct hw_trapframe *hw_tf)
                {
                        uintptr_t ip = x86_get_ip_hw(hw_tf);
                        pcpui = &per_cpu_info[core_id()];
-                       pcpui->__lock_depth_disabled++;         /* for print debugging */
+                       pcpui->__lock_checking_enabled--;               /* for print debugging */
                        /* We will muck with the actual TF.  If we're dealing with
                         * userspace, we need to make sure we edit the actual TF that will
                         * get restarted (pcpui), and not the TF on the kstack (which aren't
@@ -268,12 +268,12 @@ static void trap_dispatch(struct hw_trapframe *hw_tf)
                            *(uint8_t*)(ip + 1) == 0x01, 
                            *(uint8_t*)(ip + 2) == 0xf9) {
                                x86_fake_rdtscp(hw_tf);
-                               pcpui->__lock_depth_disabled--; /* for print debugging */
+                               pcpui->__lock_checking_enabled++;       /* for print debugging */
                                return;
                        }
                        enable_irq();
                        monitor(hw_tf);
-                       pcpui->__lock_depth_disabled--;         /* for print debugging */
+                       pcpui->__lock_checking_enabled++;               /* for print debugging */
                        break;
                }
                case T_PGFLT:
index 3b5f358..3638833 100644 (file)
@@ -63,7 +63,7 @@ void print_trapframe(struct hw_trapframe *hw_tf)
        /* This is only called in debug scenarios, and often when the kernel trapped
         * and needs to tell us about it.  Disable the lock checker so it doesn't go
         * nuts when we print/panic */
-       pcpui->__lock_depth_disabled++;
+       pcpui->__lock_checking_enabled--;
        spin_lock_irqsave(&ptf_lock);
        printk("TRAP frame at %p on core %d\n", hw_tf, core_id());
        print_regs(&hw_tf->tf_regs);
@@ -83,7 +83,7 @@ void print_trapframe(struct hw_trapframe *hw_tf)
                printk("  ss   0x----%04x\n", hw_tf->tf_ss);
        }
        spin_unlock_irqsave(&ptf_lock);
-       pcpui->__lock_depth_disabled--;
+       pcpui->__lock_checking_enabled++;
 }
 
 void page_fault_handler(struct hw_trapframe *hw_tf)
index 9b4f927..c50a518 100644 (file)
@@ -50,7 +50,7 @@ void print_trapframe(struct hw_trapframe *hw_tf)
        /* This is only called in debug scenarios, and often when the kernel trapped
         * and needs to tell us about it.  Disable the lock checker so it doesn't go
         * nuts when we print/panic */
-       pcpui->__lock_depth_disabled++;
+       pcpui->__lock_checking_enabled--;
        spin_lock_irqsave(&ptf_lock);
        printk("HW TRAP frame at %p on core %d\n", hw_tf, core_id());
        printk("  rax  0x%016lx\n",           hw_tf->tf_rax);
@@ -83,7 +83,7 @@ void print_trapframe(struct hw_trapframe *hw_tf)
        printk("  rsp  0x%016lx\n",           hw_tf->tf_rsp);
        printk("  ss   0x------------%04x\n", hw_tf->tf_ss);
        spin_unlock_irqsave(&ptf_lock);
-       pcpui->__lock_depth_disabled--;
+       pcpui->__lock_checking_enabled++;
 
        /* Used in trapentry64.S */
        static_assert(offsetof(struct hw_trapframe, tf_cs) - 
@@ -96,7 +96,7 @@ void print_swtrapframe(struct sw_trapframe *sw_tf)
 {
        static spinlock_t ptf_lock = SPINLOCK_INITIALIZER_IRQSAVE;
        struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
-       pcpui->__lock_depth_disabled++;
+       pcpui->__lock_checking_enabled--;
        spin_lock_irqsave(&ptf_lock);
        printk("SW TRAP frame at %p on core %d\n", sw_tf, core_id());
        printk("  rbx  0x%016lx\n",           sw_tf->tf_rbx);
@@ -112,7 +112,7 @@ void print_swtrapframe(struct sw_trapframe *sw_tf)
        printk(" mxcsr 0x%08x\n",             sw_tf->tf_mxcsr);
        printk(" fpucw 0x%04x\n",             sw_tf->tf_fpucw);
        spin_unlock_irqsave(&ptf_lock);
-       pcpui->__lock_depth_disabled--;
+       pcpui->__lock_checking_enabled++;
 }
 
 void page_fault_handler(struct hw_trapframe *hw_tf)
index 7414184..5ca27b9 100644 (file)
@@ -36,7 +36,7 @@ struct per_cpu_info {
        struct user_context *cur_ctx;   /* user ctx we came in on (can be 0) */
        struct user_context actual_ctx; /* storage for cur_ctx */
        uint32_t __ctx_depth;           /* don't access directly.  see trap.h. */
-       int __lock_depth_disabled;      /* disables spinlock depth checking */
+       int __lock_checking_enabled;/* == 1, enables spinlock depth checking */
        struct syscall *cur_sysc;       /* ptr is into cur_proc's address space */
        struct kthread *spare;          /* useful when restarting */
        struct timer_chain tchain;      /* for the per-core alarm */
index 7582d7d..a413d24 100644 (file)
@@ -31,22 +31,23 @@ void spin_lock(spinlock_t *lock)
        uint32_t coreid = core_id_early();
        struct per_cpu_info *pcpui = &per_cpu_info[coreid];
        /* Short circuit our lock checking, so we can print or do other things to
-        * announce the failure that require locks. */
-       if (pcpui->__lock_depth_disabled)
+        * announce the failure that require locks.  Also avoids anything else
+        * requiring pcpui initialization. */
+       if (pcpui->__lock_checking_enabled != 1)
                goto lock;
        if (lock->irq_okay) {
                if (!can_spinwait_irq(pcpui)) {
-                       pcpui->__lock_depth_disabled++;
+                       pcpui->__lock_checking_enabled--;
                        print_kctx_depths("IRQOK");
                        panic("Lock %p tried to spin when it shouldn't\n", lock);
-                       pcpui->__lock_depth_disabled--;
+                       pcpui->__lock_checking_enabled++;
                }
        } else {
                if (!can_spinwait_noirq(pcpui)) {
-                       pcpui->__lock_depth_disabled++;
+                       pcpui->__lock_checking_enabled--;
                        print_kctx_depths("NOIRQ");
                        panic("Lock %p tried to spin when it shouldn't\n", lock);
-                       pcpui->__lock_depth_disabled--;
+                       pcpui->__lock_checking_enabled++;
                }
        }
 lock:
index d410325..5a21549 100644 (file)
@@ -110,7 +110,7 @@ void _panic(const char *file, int line, const char *fmt,...)
        struct per_cpu_info *pcpui;
        /* We're panicing, possibly in a place that can't handle the lock checker */
        pcpui = &per_cpu_info[core_id_early()];
-       pcpui->__lock_depth_disabled++;
+       pcpui->__lock_checking_enabled--;
        va_start(ap, fmt);
        printk("kernel panic at %s:%d, from core %d: ", file, line,
               core_id_early());
@@ -123,7 +123,7 @@ dead:
        /* We could consider turning the lock checker back on here, but things are
         * probably a mess anyways, and with it on we would probably lock up right
         * away when we idle. */
-       //pcpui->__lock_depth_disabled--;
+       //pcpui->__lock_checking_enabled++;
        smp_idle();
 }
 
index 7b49b4e..39fc306 100644 (file)
@@ -112,6 +112,8 @@ void smp_percpu_init(void)
        assert(trace_buf);
        trace_ring_init(&pcpui->traces, trace_buf, PGSIZE,
                        sizeof(struct pcpu_trace_event));
+       /* Enable full lock debugging, after all pcpui work is done */
+       pcpui->__lock_checking_enabled = 1;
 }
 
 /* PCPUI Trace Rings: */