Spinlock work
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 25 Mar 2009 23:06:12 +0000 (16:06 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 25 Mar 2009 23:06:12 +0000 (16:06 -0700)
Fixed spin_lock, templated spin_lock_irqsave, and protect vcprintfs with
a lock (allows interleaving of output at the vcprintf granularity)

inc/atomic.h
kern/printf.c

index de60d1d..51e3766 100644 (file)
@@ -13,6 +13,8 @@ typedef struct {uint32_t real_num;} atomic_t;
 
 static inline void spin_lock(volatile uint32_t* lock);
 static inline void spin_unlock(volatile uint32_t* lock);
+static inline void spin_lock_irqsave(volatile uint32_t* lock);
+static inline void spin_unlock_irqsave(volatile uint32_t* lock);
 static inline void atomic_inc(volatile uint32_t* number);
 static inline void atomic_dec(volatile uint32_t* number);
 
@@ -21,14 +23,14 @@ static inline void spin_lock(volatile uint32_t* lock)
 {
        asm volatile(
                        "spinlock:                "
-                       "       testb $0,%0;          "
+                       "       cmpb $0, %0;          "
                        "       je getlock;           "
                        "       pause;                "
                        "       jmp spinlock;         "
                        "getlock:                 " 
                        "       movb $1, %%al;        "
                        "       xchgb %%al, %0;       "
-                       "       test $0, %%al;        "
+                       "       cmpb $0, %%al;        "
                        "       jne spinlock;         "
                : : "m"(*lock) : "eax", "cc");
 }
@@ -38,7 +40,35 @@ static inline void spin_unlock(volatile uint32_t* lock)
        *lock = 0;
 }
 
-// need to do this with pointers and deref.  not totally sure why
+// TODO - and test by holding a lock in a while loop, then see if ints are off
+// and the other case, etc.
+// if ints are enabled, disable them and note it in the top bit of the lock
+static inline void spin_lock_irqsave(volatile uint32_t* lock)
+{
+       // doesn't actually do this yet
+       // probably want to push flags, cli, grab lock, examine flags, 
+       // and toggle the bit if interrupts were enabled
+       asm volatile(
+                       "spinlock:                "
+                       "       cmpb $0, %0;          "
+                       "       je getlock;           "
+                       "       pause;                "
+                       "       jmp spinlock;         "
+                       "getlock:                 " 
+                       "       movb $1, %%al;        "
+                       "       xchgb %%al, %0;       "
+                       "       cmpb $0, %%al;        "
+                       "       jne spinlock;         "
+               : : "m"(*lock) : "eax", "cc");
+}
+
+// if the top bit of the lock is set, then re-enable interrupts (TODO)
+static inline void spin_unlock_irqsave(volatile uint32_t* lock)
+{
+       *lock = 0;
+}
+
+// need to do this with pointers and deref.  %0 needs to be the memory address
 static inline void atomic_inc(volatile uint32_t* number)
 {
        asm volatile("lock incl %0" : "=m"(*number) : : "cc");
index fef65ac..7b815c7 100644 (file)
@@ -4,7 +4,9 @@
 #include <inc/types.h>
 #include <inc/stdio.h>
 #include <inc/stdarg.h>
+#include <inc/atomic.h>
 
+uint32_t output_lock = 0;
 
 static void
 putch(int ch, int *cnt)
@@ -18,7 +20,11 @@ vcprintf(const char *fmt, va_list ap)
 {
        int cnt = 0;
 
+       // lock all output.  this will catch any printfs at line granularity
+       spin_lock_irqsave(&output_lock);
        vprintfmt(putch, &cnt, fmt, ap);
+       spin_unlock_irqsave(&output_lock);
+
        return cnt;
 }