Spinlock irqsave usage checks
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 15 Nov 2012 19:46:32 +0000 (11:46 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 21 Nov 2012 23:41:18 +0000 (15:41 -0800)
Controlled with CONFIG_SPINLOCK_DEBUG.  One big issue is that we usually
need locks to tell ourselves about the deadlock - print locks, kmsgs
locks, etc.  We'll need something else to deal with this.

Here's a list of all irqsave locks.  A few of them weren't even
initialized.  Others probably don't need to be irqsave.  And a few are
from some really ancient code.  Some of these were only detected at
runtime by the usage checks.

Take a look at these and any of your other locks, and fix them up
accordingly.

 /* sync stuff */
 barrier->lock
 list->lock (checklists)

 /* page allocation / coloring */
 cache_colors_lock
 colored_page_free_list_lock (arch dependent btw)

 /* slab allocator */
 cp->cache_lock
 kmem_caches_lock

 /* riscv/sparc */
 fesvr_lock
 cas_lock (needs attention, might not compile)

 /* console printing and reading */
 lock (x86 console lock, cons_putc needs renamed)
 output_lock (serializes all printks)
 print_info_lock (testing.c)
 ptf_lock (x86 trapframe printing lock)
 readline_lock

 /* kmsg */
 per_cpu_info[i].immed_amsg_lock
 per_cpu_info[i].routine_amsg_lock

 /* misc */
 arsc_proc_lock
 systrace_lock (does this need irqsave?)
 ucq hash locks
 packet_buffers_lock

20 files changed:
kern/arch/i686/console.c
kern/arch/i686/nic_common.c
kern/arch/i686/page_alloc.c
kern/arch/i686/trap.c
kern/arch/riscv/atomic.c
kern/arch/riscv/console.c
kern/arch/riscv/page_alloc.c
kern/arch/sparc/atomic.h
kern/arch/sparc/page_alloc.c
kern/include/atomic.h
kern/src/arsc.c
kern/src/atomic.c
kern/src/colored_caches.c
kern/src/printf.c
kern/src/process.c
kern/src/readline.c
kern/src/slab.c
kern/src/smp.c
kern/src/syscall.c
kern/src/testing.c

index f6dea20..b8b2abc 100644 (file)
@@ -241,7 +241,7 @@ lpt_putc(int c)
 #define MAX_SCROLL_LENGTH      20
 #define SCROLLING_CRT_SIZE     (MAX_SCROLL_LENGTH * CRT_SIZE)
 
-static spinlock_t SRACY lock = SPINLOCK_INITIALIZER;
+static spinlock_t SRACY lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 static unsigned SREADONLY addr_6845;
 static uint16_t *SLOCKED(&lock) COUNT(CRT_SIZE) crt_buf;
index 7156e62..75115fe 100644 (file)
@@ -29,5 +29,5 @@ char* packet_buffers[MAX_PACKET_BUFFERS];
 uint32_t packet_buffers_sizes[MAX_PACKET_BUFFERS];
 uint32_t packet_buffers_head = 0;
 uint32_t packet_buffers_tail = 0;
-spinlock_t packet_buffers_lock = SPINLOCK_INITIALIZER;
+spinlock_t packet_buffers_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
index 9d29eaf..6039636 100644 (file)
@@ -15,7 +15,7 @@
 #include <pmap.h>
 #include <kmalloc.h>
 
-spinlock_t colored_page_free_list_lock;
+spinlock_t colored_page_free_list_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 page_list_t LCKD(&colored_page_free_list_lock) * CT(llc_cache->num_colors) RO
   colored_page_free_list = NULL;
index 7bb6a83..8cb59d9 100644 (file)
@@ -216,7 +216,7 @@ print_regs(push_regs_t *regs)
 void
 print_trapframe(trapframe_t *tf)
 {
-       static spinlock_t ptf_lock;
+       static spinlock_t ptf_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
        spin_lock_irqsave(&ptf_lock);
        printk("TRAP frame at %p on core %d\n", tf, core_id());
index ba5f307..025f8d9 100644 (file)
@@ -9,7 +9,9 @@ bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
                return 0;
        
   #define K 17
-       static spinlock_t cas_locks[K*HW_CACHE_ALIGN/sizeof(spinlock_t)];
+       /* TODO: not sure if this initialization works. */
+       static spinlock_t cas_locks[K*HW_CACHE_ALIGN/sizeof(spinlock_t)] =
+                         {SPINLOCK_INITIALIZER_IRQSAVE};
 
   uintptr_t bucket = (uintptr_t)addr / sizeof(uintptr_t) % K;
        spinlock_t* lock = &cas_locks[bucket*HW_CACHE_ALIGN/sizeof(spinlock_t)];
@@ -36,7 +38,9 @@ bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val, uint32_t new_val)
                return 0;
        
   #define K 17
-       static spinlock_t cas_locks[K*HW_CACHE_ALIGN/sizeof(spinlock_t)];
+       /* TODO: not sure if this initialization works. */
+       static spinlock_t cas_locks[K*HW_CACHE_ALIGN/sizeof(spinlock_t)] =
+                         {SPINLOCK_INITIALIZER_IRQSAVE};
 
   uintptr_t bucket = (uintptr_t)addr / sizeof(uintptr_t) % K;
        spinlock_t* lock = &cas_locks[bucket*HW_CACHE_ALIGN/sizeof(spinlock_t)];
index 1dbd5a4..184b820 100644 (file)
@@ -16,7 +16,7 @@ struct fesvr_syscall {
 };
 STAILQ_HEAD(fesvr_syscall_tailq, fesvr_syscall);
 
-spinlock_t fesvr_lock = SPINLOCK_INITIALIZER;
+spinlock_t fesvr_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 struct fesvr_syscall_tailq fesvr_queue;
 struct magic_mem fesvr_current __attribute__((aligned(64)));
 
index d109a4a..c98a857 100644 (file)
@@ -21,7 +21,7 @@
 #include <colored_caches.h>
 
 page_list_t *COUNT(llc_cache->num_colors) colored_page_free_list = NULL;
-spinlock_t colored_page_free_list_lock;
+spinlock_t colored_page_free_list_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 void page_alloc_bootstrap() {
        // Allocate space for the array required to manage the free lists
index 2961b9d..b55d037 100644 (file)
@@ -135,7 +135,7 @@ static inline bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
 {
        bool retval = 0;
        long temp;
-       static spinlock_t cas_lock = SPINLOCK_INITIALIZER;
+       static spinlock_t cas_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
        if ((long)*addr != exp_val)
                return 0;
index 843a782..89165a2 100644 (file)
@@ -21,7 +21,7 @@
 #include <colored_caches.h>
 
 page_list_t *COUNT(llc_cache->num_colors) colored_page_free_list = NULL;
-spinlock_t colored_page_free_list_lock;
+spinlock_t colored_page_free_list_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 void page_alloc_bootstrap() {
         // Allocate space for the array required to manage the free lists
index 76932c7..f724af9 100644 (file)
@@ -161,7 +161,8 @@ typedef struct checklist RACY checklist_t;
 #define ZEROS_ARRAY(size) {[0 ... ((size)-1)] 0}
 
 #define DEFAULT_CHECKLIST_MASK(sz) {(sz), ZEROS_ARRAY(BYTES_FOR_BITMASK(sz))}
-#define DEFAULT_CHECKLIST(sz) {SPINLOCK_INITIALIZER, DEFAULT_CHECKLIST_MASK(sz)}
+#define DEFAULT_CHECKLIST(sz) {SPINLOCK_INITIALIZER_IRQSAVE,                   \
+                               DEFAULT_CHECKLIST_MASK(sz)}
 #define INIT_CHECKLIST(nm, sz) \
        checklist_t nm = DEFAULT_CHECKLIST(sz);
 #define INIT_CHECKLIST_MASK(nm, sz)    \
index fd53427..732aced 100644 (file)
@@ -24,7 +24,7 @@
 
 
 struct proc_list arsc_proc_list = TAILQ_HEAD_INITIALIZER(arsc_proc_list);
-spinlock_t arsc_proc_lock = SPINLOCK_INITIALIZER;
+spinlock_t arsc_proc_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 intreg_t inline syscall_async(struct proc *p, syscall_req_t *call)
 {
index 557899e..5a3408d 100644 (file)
@@ -28,6 +28,20 @@ void spin_lock(spinlock_t *lock)
 {
 #ifdef __CONFIG_SPINLOCK_DEBUG__
        uint32_t coreid = core_id();
+       struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+       /* TODO: don't print directly.  If we have a lock on the print path that
+        * fails, we'll recurse and/or deadlock */
+       if (lock->irq_okay) {
+               if (!can_spinwait_irq(pcpui)) {
+                       print_kctx_depths("IRQOK");
+                       panic("Lock %08p tried to spin when it shouldn't\n", lock);
+               }
+       } else {
+               if (!can_spinwait_noirq(pcpui)) {
+                       print_kctx_depths("NOIRQ");
+                       panic("Lock %08p tried to spin when it shouldn't\n", lock);
+               }
+       }
        __spin_lock(lock);
        lock->call_site = get_caller_pc();
        lock->calling_core = coreid;
@@ -246,7 +260,7 @@ void down_checklist(checklist_t* list)
 /* Barriers */
 void init_barrier(barrier_t* barrier, uint32_t count)
 {
-       spinlock_init(&barrier->lock);
+       spinlock_init_irqsave(&barrier->lock);
        barrier->init_count = count;
        barrier->current_count = count;
        barrier->ready = 0;
index 571c9e9..8d0859a 100644 (file)
@@ -23,7 +23,7 @@
 #define l2 (available_caches.l2)
 #define l3 (available_caches.l3)
 
-spinlock_t cache_colors_lock;
+spinlock_t cache_colors_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 /************** Cache Related Functions  *****************/
 inline void init_cache_properties(cache_t *c, size_t sz_k, size_t wa, size_t clsz) {
index b858a0b..5254746 100644 (file)
@@ -12,7 +12,7 @@
 #include <stdio.h>
 #include <stdarg.h>
 
-spinlock_t output_lock = SPINLOCK_INITIALIZER;
+spinlock_t output_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 void putch(int ch, int **cnt)
 {
index f3883e8..dcdc0c9 100644 (file)
@@ -300,7 +300,7 @@ error_t proc_alloc(struct proc **pp, struct proc *parent)
        p->open_files.open_fds = (struct fd_set*)&p->open_files.open_fds_init;
        /* Init the ucq hash lock */
        p->ucq_hashlock = (struct hashlock*)&p->ucq_hl_noref;
-       hashlock_init(p->ucq_hashlock, HASHLOCK_DEFAULT_SZ);
+       hashlock_init_irqsave(p->ucq_hashlock, HASHLOCK_DEFAULT_SZ);
 
        atomic_inc(&num_envs);
        frontend_proc_init(p);
index 5fd756e..7c6c66d 100644 (file)
@@ -9,7 +9,7 @@
 
 int readline(char *buf, size_t buf_l, const char *prompt, ...)
 {
-       static spinlock_t readline_lock = SPINLOCK_INITIALIZER;
+       static spinlock_t readline_lock = SPINLOCK_INITIALIZER_IRQSAVE;
        int i, c, echoing, retval;
        va_list ap;
 
index ac1b8de..b7ab539 100644 (file)
@@ -39,7 +39,7 @@ static void __kmem_cache_create(struct kmem_cache *kc, const char *name,
 {
        assert(kc);
        assert(align);
-       spinlock_init(&kc->cache_lock);
+       spinlock_init_irqsave(&kc->cache_lock);
        kc->name = name;
        kc->obj_size = obj_size;
        kc->align = align;
@@ -70,7 +70,7 @@ static void __kmem_cache_create(struct kmem_cache *kc, const char *name,
 
 void kmem_cache_init(void)
 {
-       spinlock_init(&kmem_caches_lock);
+       spinlock_init_irqsave(&kmem_caches_lock);
        SLIST_INIT(&kmem_caches);
        /* We need to call the __ version directly to bootstrap the global
         * kmem_cache_cache. */
index fd16a42..a1b7308 100644 (file)
@@ -93,9 +93,9 @@ void smp_percpu_init(void)
        __arch_pcpu_init(coreid);
        per_cpu_info[coreid].spare = 0;
        /* Init relevant lists */
-       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
+       spinlock_init_irqsave(&per_cpu_info[coreid].immed_amsg_lock);
        STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
-       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
+       spinlock_init_irqsave(&per_cpu_info[coreid].routine_amsg_lock);
        STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
        /* Initialize the per-core timer chain */
        init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt);
index 3c63eef..74e8ae2 100644 (file)
@@ -48,7 +48,7 @@ struct systrace_record *systrace_buffer = 0;
 uint32_t systrace_bufidx = 0;
 size_t systrace_bufsize = 0;
 struct proc *systrace_procs[MAX_NUM_TRACED] = {0};
-spinlock_t systrace_lock = SPINLOCK_INITIALIZER;
+spinlock_t systrace_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 /* Not enforcing the packing of systrace_procs yet, but don't rely on that */
 static bool proc_is_traced(struct proc *p)
index 01a6849..aeb7658 100644 (file)
@@ -586,7 +586,7 @@ void test_hello_world_handler(trapframe_t *tf, void* data)
                trapno, core_id(), tf);
 }
 
-spinlock_t print_info_lock = SPINLOCK_INITIALIZER;
+spinlock_t print_info_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 void test_print_info_handler(trapframe_t *tf, void* data)
 {