Implement poll() on top of select()
[akaros.git] / user / pthread / pthread.c
index 0d0612a..9a6691e 100644 (file)
@@ -1,28 +1,30 @@
-#include <ros/arch/trapframe.h>
-#include <pthread.h>
-#include <vcore.h>
-#include <mcs.h>
+#include <ros/trapframe.h>
+#include "pthread.h"
+#include <parlib/vcore.h>
+#include <parlib/mcs.h>
 #include <stdlib.h>
 #include <string.h>
-#include <assert.h>
+#include <parlib/assert.h>
 #include <stdio.h>
 #include <errno.h>
-#include <parlib.h>
+#include <parlib/parlib.h>
 #include <ros/event.h>
-#include <arch/atomic.h>
-#include <arch/arch.h>
+#include <parlib/arch/atomic.h>
+#include <parlib/arch/arch.h>
 #include <sys/queue.h>
 #include <sys/mman.h>
-#include <assert.h>
-#include <event.h>
-#include <ucq.h>
+#include <parlib/event.h>
+#include <parlib/ucq.h>
+#include <parlib/signal.h>
+#include <parlib/arch/trap.h>
 
 struct pthread_queue ready_queue = TAILQ_HEAD_INITIALIZER(ready_queue);
 struct pthread_queue active_queue = TAILQ_HEAD_INITIALIZER(active_queue);
 struct mcs_pdr_lock queue_lock;
-pthread_once_t init_once = PTHREAD_ONCE_INIT;
 int threads_ready = 0;
 int threads_active = 0;
+atomic_t threads_total;
+bool need_tls = TRUE;
 
 /* Array of per-vcore structs to manage waiting on syscalls and handling
  * overflow.  Init'd in pth_init(). */
@@ -31,43 +33,46 @@ struct sysc_mgmt *sysc_mgmt = 0;
 /* Helper / local functions */
 static int get_next_pid(void);
 static inline void spin_to_sleep(unsigned int spins, unsigned int *spun);
+static inline void pthread_exit_no_cleanup(void *ret);
 
 /* Pthread 2LS operations */
-void pth_sched_entry(void);
-void pth_thread_runnable(struct uthread *uthread);
-void pth_thread_paused(struct uthread *uthread);
-void pth_thread_blockon_sysc(struct uthread *uthread, void *sysc);
-void pth_thread_has_blocked(struct uthread *uthread, int flags);
-void pth_preempt_pending(void);
-void pth_spawn_thread(uintptr_t pc_start, void *data);
+static void pth_sched_entry(void);
+static void pth_thread_runnable(struct uthread *uthread);
+static void pth_thread_paused(struct uthread *uthread);
+static void pth_thread_blockon_sysc(struct uthread *uthread, void *sysc);
+static void pth_thread_has_blocked(struct uthread *uthread, int flags);
+static void pth_thread_refl_fault(struct uthread *uth,
+                                  struct user_context *ctx);
 
 /* Event Handlers */
-static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type);
+static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type,
+                               void *data);
 
 struct schedule_ops pthread_sched_ops = {
-       pth_sched_entry,
-       pth_thread_runnable,
-       pth_thread_paused,
-       pth_thread_blockon_sysc,
-       pth_thread_has_blocked,
-       0, /* pth_preempt_pending, */
-       0, /* pth_spawn_thread, */
+       .sched_entry = pth_sched_entry,
+       .thread_runnable = pth_thread_runnable,
+       .thread_paused = pth_thread_paused,
+       .thread_blockon_sysc = pth_thread_blockon_sysc,
+       .thread_has_blocked = pth_thread_has_blocked,
+       .thread_refl_fault = pth_thread_refl_fault,
 };
 
-/* Publish our sched_ops, overriding the weak defaults */
-struct schedule_ops *sched_ops = &pthread_sched_ops;
-
 /* Static helpers */
 static void __pthread_free_stack(struct pthread_tcb *pt);
 static int __pthread_allocate_stack(struct pthread_tcb *pt);
+static void __pth_yield_cb(struct uthread *uthread, void *junk);
 
 /* Called from vcore entry.  Options usually include restarting whoever was
  * running there before or running a new thread.  Events are handled out of
  * event.c (table of function pointers, stuff like that). */
-void __attribute__((noreturn)) pth_sched_entry(void)
+static void __attribute__((noreturn)) pth_sched_entry(void)
 {
        uint32_t vcoreid = vcore_id();
        if (current_uthread) {
+               /* Prep the pthread to run any pending posix signal handlers registered
+         * via pthread_kill once it is restored. */
+               uthread_prep_pending_signals(current_uthread);
+               /* Run the thread itself */
                run_current_uthread();
                assert(0);
        }
@@ -82,8 +87,10 @@ void __attribute__((noreturn)) pth_sched_entry(void)
                mcs_pdr_lock(&queue_lock);
                new_thread = TAILQ_FIRST(&ready_queue);
                if (new_thread) {
-                       TAILQ_REMOVE(&ready_queue, new_thread, next);
-                       TAILQ_INSERT_TAIL(&active_queue, new_thread, next);
+                       TAILQ_REMOVE(&ready_queue, new_thread, tq_next);
+                       assert(new_thread->state == PTH_RUNNABLE);
+                       new_thread->state = PTH_RUNNING;
+                       TAILQ_INSERT_TAIL(&active_queue, new_thread, tq_next);
                        threads_active++;
                        threads_ready--;
                        mcs_pdr_unlock(&queue_lock);
@@ -99,10 +106,13 @@ void __attribute__((noreturn)) pth_sched_entry(void)
                /* no new thread, try to yield */
                printd("[P] No threads, vcore %d is yielding\n", vcore_id());
                /* TODO: you can imagine having something smarter here, like spin for a
-                * bit before yielding (or not at all if you want to be greedy). */
+                * bit before yielding. */
                vcore_yield(FALSE);
        } while (1);
-       assert(new_thread->state == PTH_RUNNABLE);
+       /* Prep the pthread to run any pending posix signal handlers registered
+     * via pthread_kill once it is restored. */
+       uthread_prep_pending_signals((struct uthread*)new_thread);
+       /* Run the thread itself */
        run_uthread((struct uthread*)new_thread);
        assert(0);
 }
@@ -111,10 +121,12 @@ void __attribute__((noreturn)) pth_sched_entry(void)
 static void __pthread_run(void)
 {
        struct pthread_tcb *me = pthread_self();
-       pthread_exit(me->start_routine(me->arg));
+       pthread_exit_no_cleanup(me->start_routine(me->arg));
 }
 
-void pth_thread_runnable(struct uthread *uthread)
+/* GIANT WARNING: if you make any changes to this, also change the broadcast
+ * wakeups (cond var, barrier, etc) */
+static void pth_thread_runnable(struct uthread *uthread)
 {
        struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
        /* At this point, the 2LS can see why the thread blocked and was woken up in
@@ -127,78 +139,24 @@ void pth_thread_runnable(struct uthread *uthread)
                case (PTH_BLK_YIELDING):
                case (PTH_BLK_JOINING):
                case (PTH_BLK_SYSC):
+               case (PTH_BLK_PAUSED):
                case (PTH_BLK_MUTEX):
                        /* can do whatever for each of these cases */
                        break;
                default:
-                       printf("Odd state %d for pthread %08p\n", pthread->state, pthread);
+                       panic("Odd state %d for pthread %08p\n", pthread->state, pthread);
        }
        pthread->state = PTH_RUNNABLE;
        /* Insert the newly created thread into the ready queue of threads.
         * It will be removed from this queue later when vcore_entry() comes up */
        mcs_pdr_lock(&queue_lock);
-       TAILQ_INSERT_TAIL(&ready_queue, pthread, next);
+       /* Again, GIANT WARNING: if you change this, change batch wakeup code */
+       TAILQ_INSERT_TAIL(&ready_queue, pthread, tq_next);
        threads_ready++;
        mcs_pdr_unlock(&queue_lock);
        /* Smarter schedulers should look at the num_vcores() and how much work is
         * going on to make a decision about how many vcores to request. */
-       vcore_request(threads_ready);
-}
-
-/* The calling thread is yielding.  Do what you need to do to restart (like put
- * yourself on a runqueue), or do some accounting.  Eventually, this might be a
- * little more generic than just yield. */
-/* TODO: keeping this around temporarily */
-static void pth_thread_yield(struct uthread *uthread, void *junk)
-{
-       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
-       struct pthread_tcb *temp_pth = 0;       /* used for exiting AND joining */
-       /* Remove from the active list, whether exiting or yielding. */
-       mcs_pdr_lock(&queue_lock);
-       threads_active--;
-       TAILQ_REMOVE(&active_queue, pthread, next);
-       mcs_pdr_unlock(&queue_lock);
-       if (pthread->state == PTH_EXITING) {
-               /* Destroy the pthread */
-               uthread_cleanup(uthread);
-               /* Cleanup, mirroring pthread_create() */
-               __pthread_free_stack(pthread);
-               /* TODO: race on detach state */
-               if (pthread->detached) {
-                       free(pthread);
-               } else {
-                       /* See if someone is joining on us.  If not, we're done (and the
-                        * joiner will wake itself when it saw us there instead of 0). */
-                       temp_pth = atomic_swap_ptr((void**)&pthread->joiner, pthread);
-                       if (temp_pth) {
-                               /* they joined before we exited, we need to wake them */
-                               printd("[pth] %08p exiting, waking joiner %08p\n",
-                                      pthread, temp_pth);
-                               uthread_runnable((struct uthread*)temp_pth);
-                       }
-               }
-       } else if (pthread->state == PTH_BLK_JOINING) {
-               /* We're trying to join, yield til we get woken up */
-               /* put ourselves in the join target's joiner slot.  If we get anything
-                * back, we lost the race and need to wake ourselves. */
-               temp_pth = atomic_swap_ptr((void**)&pthread->join_target->joiner,
-                                          pthread);
-               /* after that atomic swap, the pthread might be woken up (if it
-                * succeeded), so don't touch pthread again after that (this following
-                * if () is okay). */
-               if (temp_pth) {
-                       assert(temp_pth == pthread->join_target);       /* Sanity */
-                       /* wake ourselves, not the exited one! */
-                       printd("[pth] %08p already exit, rewaking ourselves, joiner %08p\n",
-                              temp_pth, pthread);
-                       uthread_runnable((struct uthread*)pthread);
-               }
-       } else {
-               assert(pthread->state == PTH_BLK_YIELDING);
-               /* Yielding for no apparent reason (being nice / help break deadlocks).
-                * Just wake it up and make it ready again. */
-               uthread_runnable((struct uthread*)pthread);
-       }
+       vcore_request_more(threads_ready);
 }
 
 /* For some reason not under its control, the uthread stopped running (compared
@@ -211,20 +169,17 @@ static void pth_thread_yield(struct uthread *uthread, void *junk)
  * vcore that is losing the uthread.  If that vcore is running, it'll be in a
  * preempt-event handling loop (not in your 2LS code).  If this is a big
  * problem, I'll change it. */
-void pth_thread_paused(struct uthread *uthread)
+static void pth_thread_paused(struct uthread *uthread)
 {
        struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
-       /* Remove from the active list.  Note that I don't particularly care about
-        * the active list.  We keep it around because it causes bugs and keeps us
-        * honest.  After all, some 2LS may want an active list */
-       mcs_pdr_lock(&queue_lock);
-       threads_active--;
-       TAILQ_REMOVE(&active_queue, pthread, next);
-       mcs_pdr_unlock(&queue_lock);
+
+       __pthread_generic_yield(pthread);
+       /* communicate to pth_thread_runnable */
+       pthread->state = PTH_BLK_PAUSED;
        /* At this point, you could do something clever, like put it at the front of
         * the runqueue, see if it was holding a lock, do some accounting, or
         * whatever. */
-       uthread_runnable(uthread);
+       pth_thread_runnable(uthread);
 }
 
 /* Restarts a uthread hanging off a syscall.  For the simple pthread case, we
@@ -237,12 +192,13 @@ static void restart_thread(struct syscall *sysc)
        assert(((struct pthread_tcb*)ut_restartee)->state == PTH_BLK_SYSC);
        assert(ut_restartee->sysc == sysc);     /* set in uthread.c */
        ut_restartee->sysc = 0; /* so we don't 'reblock' on this later */
-       uthread_runnable(ut_restartee);
+       pth_thread_runnable(ut_restartee);
 }
 
 /* This handler is usually run in vcore context, though I can imagine it being
  * called by a uthread in some other threading library. */
-static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type)
+static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type,
+                               void *data)
 {
        struct syscall *sysc;
        assert(in_vcore_context());
@@ -266,19 +222,15 @@ static void pth_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type)
  * and is trying to block on sysc.  Need to put it somewhere were we can wake it
  * up when the sysc is done.  For now, we'll have the kernel send us an event
  * when the syscall is done. */
-void pth_thread_blockon_sysc(struct uthread *uthread, void *syscall)
+static void pth_thread_blockon_sysc(struct uthread *uthread, void *syscall)
 {
        struct syscall *sysc = (struct syscall*)syscall;
        int old_flags;
-       bool need_to_restart = FALSE;
        uint32_t vcoreid = vcore_id();
-       /* rip from the active queue */
        struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+
+       __pthread_generic_yield(pthread);
        pthread->state = PTH_BLK_SYSC;
-       mcs_pdr_lock(&queue_lock);
-       threads_active--;
-       TAILQ_REMOVE(&active_queue, pthread, next);
-       mcs_pdr_unlock(&queue_lock);
        /* Set things up so we can wake this thread up later */
        sysc->u_data = uthread;
        /* Register our vcore's syscall ev_q to hear about this syscall. */
@@ -290,9 +242,11 @@ void pth_thread_blockon_sysc(struct uthread *uthread, void *syscall)
        /* GIANT WARNING: do not touch the thread after this point. */
 }
 
-void pth_thread_has_blocked(struct uthread *uthread, int flags)
+static void pth_thread_has_blocked(struct uthread *uthread, int flags)
 {
        struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+
+       __pthread_generic_yield(pthread);
        /* could imagine doing something with the flags.  For now, we just treat all
         * externally blocked reasons as 'MUTEX'.  Whatever we do here, we are
         * mostly communicating to our future selves in pth_thread_runnable(), which
@@ -303,20 +257,99 @@ void pth_thread_has_blocked(struct uthread *uthread, int flags)
                printf("For great justice!\n");
 }
 
-void pth_preempt_pending(void)
+static void __signal_and_restart(struct uthread *uthread,
+                                 int signo, int code, void *addr)
+{
+       uthread_prep_signal_from_fault(uthread, signo, code, addr);
+       pth_thread_runnable(uthread);
+}
+
+static void handle_div_by_zero(struct uthread *uthread, unsigned int err,
+                               unsigned long aux)
+{
+       __signal_and_restart(uthread, SIGFPE, FPE_INTDIV, (void*)aux);
+}
+
+static void handle_gp_fault(struct uthread *uthread, unsigned int err,
+                            unsigned long aux)
 {
+       __signal_and_restart(uthread, SIGSEGV, SEGV_ACCERR, (void*)aux);
 }
 
-void pth_spawn_thread(uintptr_t pc_start, void *data)
+static void handle_page_fault(struct uthread *uthread, unsigned int err,
+                              unsigned long aux)
 {
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+       if (!(err & PF_VMR_BACKED)) {
+               __signal_and_restart(uthread, SIGSEGV, SEGV_MAPERR, (void*)aux);
+       } else {
+               syscall_async(&uthread->local_sysc, SYS_populate_va, aux, 1);
+               __block_uthread_on_async_sysc(uthread);
+       }
+}
+
+static void pth_thread_refl_hw_fault(struct uthread *uthread,
+                                     unsigned int trap_nr,
+                                     unsigned int err, unsigned long aux)
+{
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+
+       __pthread_generic_yield(pthread);
+       pthread->state = PTH_BLK_SYSC;
+
+       switch (trap_nr) {
+       case HW_TRAP_DIV_ZERO:
+               handle_div_by_zero(uthread, err, aux);
+               break;
+       case HW_TRAP_GP_FAULT:
+               handle_gp_fault(uthread, err, aux);
+               break;
+       case HW_TRAP_PAGE_FAULT:
+               handle_page_fault(uthread, err, aux);
+               break;
+       default:
+               printf("Pthread has unhandled fault: %d, err: %d, aux: %p\n",
+                      trap_nr, err, aux);
+               /* Note that uthread.c already copied out our ctx into the uth
+                * struct */
+               print_user_context(&uthread->u_ctx);
+               printf("Turn on printx to spew unhandled, malignant trap info\n");
+               exit(-1);
+       }
+}
+
+static void pth_thread_refl_fault(struct uthread *uth,
+                                  struct user_context *ctx)
+{
+       switch (ctx->type) {
+       case ROS_HW_CTX:
+               pth_thread_refl_hw_fault(uth, __arch_refl_get_nr(ctx),
+                                        __arch_refl_get_err(ctx),
+                                        __arch_refl_get_aux(ctx));
+               break;
+       default:
+               assert(0);
+       }
+}
+
+/* Akaros pthread extensions / hacks */
+
+void pthread_need_tls(bool need)
+{
+       need_tls = need;
 }
 
 /* Pthread interface stuff and helpers */
 
 int pthread_attr_init(pthread_attr_t *a)
 {
+       a->stackaddr = 0;
        a->stacksize = PTHREAD_STACK_SIZE;
        a->detachstate = PTHREAD_CREATE_JOINABLE;
+       /* priority and policy should be set by anyone changing inherit. */
+       a->sched_priority = 0;
+       a->sched_policy = 0;
+       a->sched_inherit = PTHREAD_INHERIT_SCHED;
        return 0;
 }
 
@@ -327,18 +360,23 @@ int pthread_attr_destroy(pthread_attr_t *a)
 
 static void __pthread_free_stack(struct pthread_tcb *pt)
 {
-       assert(!munmap(pt->stacktop - pt->stacksize, pt->stacksize));
+       int ret = munmap(pt->stacktop - pt->stacksize, pt->stacksize);
+       assert(!ret);
 }
 
 static int __pthread_allocate_stack(struct pthread_tcb *pt)
 {
+       int force_a_page_fault;
        assert(pt->stacksize);
        void* stackbot = mmap(0, pt->stacksize,
                              PROT_READ|PROT_WRITE|PROT_EXEC,
-                             MAP_POPULATE|MAP_ANONYMOUS, -1, 0);
+                             MAP_ANONYMOUS, -1, 0);
        if (stackbot == MAP_FAILED)
                return -1; // errno set by mmap
        pt->stacktop = stackbot + pt->stacksize;
+       /* Want the top of the stack populated, but not the rest of the stack;
+        * that'll grow on demand (up to pt->stacksize) */
+       force_a_page_fault = ACCESS_ONCE(*(int*)(pt->stacktop - sizeof(int)));
        return 0;
 }
 
@@ -354,38 +392,77 @@ int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
        attr->stacksize = stacksize;
        return 0;
 }
+
 int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
 {
        *stacksize = attr->stacksize;
        return 0;
 }
 
-/* Do whatever init you want.  At some point call uthread_lib_init() and pass it
+int pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
+{
+       attr->guardsize = guardsize;
+       return 0;
+}
+
+int pthread_attr_getguardsize(pthread_attr_t *attr, size_t *guardsize)
+{
+       *guardsize = attr->guardsize;
+       return 0;
+}
+
+int pthread_attr_getstack(const pthread_attr_t *__restrict __attr,
+                                                  void **__stackaddr, size_t *__stacksize)
+{
+       *__stackaddr = __attr->stackaddr;
+       *__stacksize = __attr->stacksize;
+       return 0;
+}
+
+int pthread_getattr_np(pthread_t __th, pthread_attr_t *__attr)
+{
+       __attr->stackaddr = __th->stacktop - __th->stacksize;
+       __attr->stacksize = __th->stacksize;
+       if (__th->detached)
+               __attr->detachstate = PTHREAD_CREATE_DETACHED;
+       else
+               __attr->detachstate = PTHREAD_CREATE_JOINABLE;
+       return 0;
+}
+
+/* Do whatever init you want.  At some point call uthread_2ls_init() and pass it
  * a uthread representing thread0 (int main()) */
-static int pthread_lib_init(void)
+void __attribute__((constructor)) pthread_lib_init(void)
 {
-       /* Make sure this only runs once */
-       static bool initialized = FALSE;
-       if (initialized)
-               return -1;
-       initialized = TRUE;
        uintptr_t mmap_block;
+       struct pthread_tcb *t;
+       int ret;
+
+       /* Only run once, but make sure that uthread_lib_init() has run already. */
+       init_once_racy(return);
+       uthread_lib_init();
+
        mcs_pdr_init(&queue_lock);
        /* Create a pthread_tcb for the main thread */
-       pthread_t t = (pthread_t)calloc(1, sizeof(struct pthread_tcb));
-       assert(t);
+       ret = posix_memalign((void**)&t, __alignof__(struct pthread_tcb),
+                            sizeof(struct pthread_tcb));
+       assert(!ret);
+       memset(t, 0, sizeof(struct pthread_tcb));       /* aggressively 0 for bugs */
        t->id = get_next_pid();
        t->stacksize = USTACK_NUM_PAGES * PGSIZE;
        t->stacktop = (void*)USTACKTOP;
        t->detached = TRUE;
        t->state = PTH_RUNNING;
-       t->join_target = 0;
        t->joiner = 0;
+       /* implies that sigmasks are longs, which they are. */
        assert(t->id == 0);
+       t->sched_policy = SCHED_FIFO;
+       t->sched_priority = 0;
+       SLIST_INIT(&t->cr_stack);
        /* Put the new pthread (thread0) on the active queue */
-       mcs_pdr_lock(&queue_lock);      /* arguably, we don't need these (_S mode) */
+       mcs_pdr_lock(&queue_lock);
        threads_active++;
-       TAILQ_INSERT_TAIL(&active_queue, t, next);
+       TAILQ_INSERT_TAIL(&active_queue, t, tq_next);
        mcs_pdr_unlock(&queue_lock);
        /* Tell the kernel where and how we want to receive events.  This is just an
         * example of what to do to have a notification turned on.  We're turning on
@@ -396,7 +473,7 @@ static int pthread_lib_init(void)
        enable_kevent(EV_USER_IPI, 0, EVENT_IPI | EVENT_VCORE_PRIVATE);
 
        /* Handle syscall events. */
-       ev_handlers[EV_SYSCALL] = pth_handle_syscall;
+       register_ev_handler(EV_SYSCALL, pth_handle_syscall, 0);
        /* Set up the per-vcore structs to track outstanding syscalls */
        sysc_mgmt = malloc(sizeof(struct sysc_mgmt) * max_vcores());
        assert(sysc_mgmt);
@@ -410,10 +487,12 @@ static int pthread_lib_init(void)
         * max_vcores()). */
        for (int i = 0; i < max_vcores(); i++) {
                /* Each vcore needs to point to a non-VCPD ev_q */
-               sysc_mgmt[i].ev_q = get_big_event_q_raw();
-               sysc_mgmt[i].ev_q->ev_flags = EVENT_IPI | EVENT_INDIR | EVENT_FALLBACK;
+               sysc_mgmt[i].ev_q = get_eventq_raw();
+               sysc_mgmt[i].ev_q->ev_flags = EVENT_IPI | EVENT_INDIR |
+                                             EVENT_SPAM_INDIR | EVENT_WAKEUP;
                sysc_mgmt[i].ev_q->ev_vcore = i;
-               ucq_init_raw(&sysc_mgmt[i].ev_q->ev_mbox->ev_msgs, 
+               sysc_mgmt[i].ev_q->ev_mbox->type = EV_MBOX_UCQ;
+               ucq_init_raw(&sysc_mgmt[i].ev_q->ev_mbox->ucq,
                             mmap_block + (2 * i    ) * PGSIZE, 
                             mmap_block + (2 * i + 1) * PGSIZE); 
        }
@@ -428,47 +507,70 @@ static int pthread_lib_init(void)
        assert(sysc_mbox);
        assert(two_pages);
        memset(sysc_mbox, 0, sizeof(struct event_mbox));
-       ucq_init_raw(&sysc_mbox->ev_msgs, two_pages, two_pages + PGSIZE);
+       sysc_mbox->type = EV_MBOX_UCQ;
+       ucq_init_raw(&sysc_mbox->ucq, two_pages, two_pages + PGSIZE);
        for (int i = 0; i < max_vcores(); i++) {
-               sysc_mgmt[i].ev_q = get_event_q();
-               sysc_mgmt[i].ev_q->ev_flags = EVENT_IPI | EVENT_INDIR | EVENT_FALLBACK;
+               sysc_mgmt[i].ev_q = get_eventq_slim();
+               sysc_mgmt[i].ev_q->ev_flags = EVENT_IPI | EVENT_INDIR |
+                                             EVENT_SPAM_INDIR | EVENT_WAKEUP;
                sysc_mgmt[i].ev_q->ev_vcore = i;
                sysc_mgmt[i].ev_q->ev_mbox = sysc_mbox;
        }
 #endif
-       /* Initialize the uthread code (we're in _M mode after this).  Doing this
-        * last so that all the event stuff is ready when we're in _M mode.  Not a
-        * big deal one way or the other.  Note that vcore_init() hasn't happened
-        * yet, so if a 2LS somehow wants to have its init stuff use things like
-        * vcore stacks or TLSs, we'll need to change this. */
-       assert(!uthread_lib_init((struct uthread*)t));
-       return 0;
+       /* Sched ops is set by 2ls_init */
+       uthread_2ls_init((struct uthread*)t, &pthread_sched_ops);
+       atomic_init(&threads_total, 1);                 /* one for thread0 */
 }
 
-int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
-                   void *(*start_routine)(void *), void *arg)
+/* Make sure our scheduler runs inside an MCP rather than an SCP. */
+void pthread_mcp_init()
 {
-       static bool first = TRUE;
-       if (first) {
-               assert(!pthread_lib_init());
-               first = FALSE;
-       }
-       /* Create the actual thread */
+       /* Prevent this from happening more than once. */
+       init_once_racy(return);
+
+       uthread_mcp_init();
+       /* From here forward we are an MCP running on vcore 0. Could consider doing
+        * other pthread specific initialization based on knowing we are an mcp
+        * after this point. */
+}
+
+int __pthread_create(pthread_t *thread, const pthread_attr_t *attr,
+                     void *(*start_routine)(void *), void *arg)
+{
+       struct uth_thread_attr uth_attr = {0};
+       struct pthread_tcb *parent;
        struct pthread_tcb *pthread;
-       pthread = (pthread_t)calloc(1, sizeof(struct pthread_tcb));
-       assert(pthread);
+       int ret;
+
+       /* For now, unconditionally become an mcp when creating a pthread (if not
+        * one already). This may change in the future once we support 2LSs in an
+        * SCP. */
+       pthread_mcp_init();
+
+       parent = (struct pthread_tcb*)current_uthread;
+       ret = posix_memalign((void**)&pthread, __alignof__(struct pthread_tcb),
+                            sizeof(struct pthread_tcb));
+       assert(!ret);
+       memset(pthread, 0, sizeof(struct pthread_tcb)); /* aggressively 0 for bugs*/
        pthread->stacksize = PTHREAD_STACK_SIZE;        /* default */
        pthread->state = PTH_CREATED;
        pthread->id = get_next_pid();
        pthread->detached = FALSE;                              /* default */
-       pthread->join_target = 0;
        pthread->joiner = 0;
+       /* Might override these later, based on attr && EXPLICIT_SCHED */
+       pthread->sched_policy = parent->sched_policy;
+       pthread->sched_priority = parent->sched_priority;
+       SLIST_INIT(&pthread->cr_stack);
        /* Respect the attributes */
        if (attr) {
                if (attr->stacksize)                                    /* don't set a 0 stacksize */
                        pthread->stacksize = attr->stacksize;
                if (attr->detachstate == PTHREAD_CREATE_DETACHED)
                        pthread->detached = TRUE;
+               if (attr->sched_inherit == PTHREAD_EXPLICIT_SCHED) {
+                       pthread->sched_policy = attr->sched_policy;
+                       pthread->sched_priority = attr->sched_priority;
+               }
        }
        /* allocate a stack */
        if (__pthread_allocate_stack(pthread))
@@ -476,52 +578,189 @@ int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
        /* Set the u_tf to start up in __pthread_run, which will call the real
         * start_routine and pass it the arg.  Note those aren't set until later in
         * pthread_create(). */
-       init_user_tf(&pthread->uthread.utf, (long)&__pthread_run,
-                    (long)(pthread->stacktop));
+       init_user_ctx(&pthread->uthread.u_ctx, (uintptr_t)&__pthread_run,
+                     (uintptr_t)(pthread->stacktop));
        pthread->start_routine = start_routine;
        pthread->arg = arg;
        /* Initialize the uthread */
-       uthread_init((struct uthread*)pthread);
-       uthread_runnable((struct uthread*)pthread);
+       if (need_tls)
+               uth_attr.want_tls = TRUE;
+       uthread_init((struct uthread*)pthread, &uth_attr);
        *thread = pthread;
+       atomic_inc(&threads_total);
+       return 0;
+}
+
+int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
+                   void *(*start_routine)(void *), void *arg)
+{
+       if (!__pthread_create(thread, attr, start_routine, arg))
+               pth_thread_runnable((struct uthread*)*thread);
        return 0;
 }
 
-int pthread_join(pthread_t thread, void** retval)
+/* Helper that all pthread-controlled yield paths call.  Just does some
+ * accounting.  This is another example of how the much-loathed (and loved)
+ * active queue is keeping us honest.  Need to export for sem and friends. */
+void __pthread_generic_yield(struct pthread_tcb *pthread)
+{
+       mcs_pdr_lock(&queue_lock);
+       threads_active--;
+       TAILQ_REMOVE(&active_queue, pthread, tq_next);
+       mcs_pdr_unlock(&queue_lock);
+}
+
+/* Callback/bottom half of join, called from __uthread_yield (vcore context).
+ * join_target is who we are trying to join on (and who is calling exit). */
+static void __pth_join_cb(struct uthread *uthread, void *arg)
+{
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+       struct pthread_tcb *join_target = (struct pthread_tcb*)arg;
+       struct pthread_tcb *temp_pth = 0;
+       __pthread_generic_yield(pthread);
+       /* We're trying to join, yield til we get woken up */
+       pthread->state = PTH_BLK_JOINING;       /* could do this front-side */
+       /* Put ourselves in the join target's joiner slot.  If we get anything back,
+        * we lost the race and need to wake ourselves.  Syncs with __pth_exit_cb.*/
+       temp_pth = atomic_swap_ptr((void**)&join_target->joiner, pthread);
+       /* After that atomic swap, the pthread might be woken up (if it succeeded),
+        * so don't touch pthread again after that (this following if () is okay).*/
+       if (temp_pth) {         /* temp_pth != 0 means they exited first */
+               assert(temp_pth == join_target);        /* Sanity */
+               /* wake ourselves, not the exited one! */
+               printd("[pth] %08p already exit, rewaking ourselves, joiner %08p\n",
+                      temp_pth, pthread);
+               pth_thread_runnable(uthread);   /* wake ourselves */
+       }
+}
+
+int pthread_join(struct pthread_tcb *join_target, void **retval)
 {
-       struct pthread_tcb *caller = (struct pthread_tcb*)current_uthread;
        /* Not sure if this is the right semantics.  There is a race if we deref
-        * thread and he is already freed (which would have happened if he was
+        * join_target and he is already freed (which would have happened if he was
         * detached. */
-       if (thread->detached) {
+       if (join_target->detached) {
                printf("[pthread] trying to join on a detached pthread");
                return -1;
        }
        /* See if it is already done, to avoid the pain of a uthread_yield() (the
         * early check is an optimization, pth_thread_yield() handles the race). */
-       if (!thread->joiner) {
-               /* Time to join, set things up so pth_thread_yield() knows what to do */
-               caller->state = PTH_BLK_JOINING;
-               caller->join_target = thread;
-               uthread_yield(TRUE, pth_thread_yield, 0);
+       if (!join_target->joiner) {
+               uthread_yield(TRUE, __pth_join_cb, join_target);
                /* When we return/restart, the thread will be done */
        } else {
-               assert(thread->joiner == thread);       /* sanity check */
+               assert(join_target->joiner == join_target);     /* sanity check */
        }
        if (retval)
-               *retval = thread->retval;
-       free(thread);
+               *retval = join_target->retval;
+       free(join_target);
        return 0;
 }
 
+/* Callback/bottom half of exit.  Syncs with __pth_join_cb.  Here's how it
+ * works: the slot for joiner is initially 0.  Joiners try to swap themselves
+ * into that spot.  Exiters try to put 'themselves' into it.  Whoever gets 0
+ * back won the race.  If the exiter lost the race, it must wake up the joiner
+ * (which was the value from temp_pth).  If the joiner lost the race, it must
+ * wake itself up, and for sanity reasons can ensure the value from temp_pth is
+ * the join target). */
+static void __pth_exit_cb(struct uthread *uthread, void *junk)
+{
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+       struct pthread_tcb *temp_pth = 0;
+       __pthread_generic_yield(pthread);
+       /* Catch some bugs */
+       pthread->state = PTH_EXITING;
+       /* Destroy the pthread */
+       uthread_cleanup(uthread);
+       /* Cleanup, mirroring pthread_create() */
+       __pthread_free_stack(pthread);
+       /* TODO: race on detach state (see join) */
+       if (pthread->detached) {
+               free(pthread);
+       } else {
+               /* See if someone is joining on us.  If not, we're done (and the
+                * joiner will wake itself when it saw us there instead of 0). */
+               temp_pth = atomic_swap_ptr((void**)&pthread->joiner, pthread);
+               if (temp_pth) {
+                       /* they joined before we exited, we need to wake them */
+                       printd("[pth] %08p exiting, waking joiner %08p\n",
+                              pthread, temp_pth);
+                       pth_thread_runnable((struct uthread*)temp_pth);
+               }
+       }
+       /* If we were the last pthread, we exit for the whole process.  Keep in mind
+        * that thread0 is counted in this, so this will only happen if that thread
+        * calls pthread_exit(). */
+       if ((atomic_fetch_and_add(&threads_total, -1) == 1))
+               exit(0);
+}
+
+static inline void pthread_exit_no_cleanup(void *ret)
+{
+       struct pthread_tcb *pthread = pthread_self();
+       pthread->retval = ret;
+       destroy_dtls();
+       while (SLIST_FIRST(&pthread->cr_stack))
+               pthread_cleanup_pop(FALSE);
+       uthread_yield(FALSE, __pth_exit_cb, 0);
+}
+
+void pthread_exit(void *ret)
+{
+       struct pthread_tcb *pthread = pthread_self();
+       while (SLIST_FIRST(&pthread->cr_stack))
+               pthread_cleanup_pop(TRUE);
+       pthread_exit_no_cleanup(ret);
+}
+
+/* Callback/bottom half of yield.  For those writing these pth callbacks, the
+ * minimum is call generic, set state (communicate with runnable), then do
+ * something that causes it to be runnable in the future (or right now). */
+static void __pth_yield_cb(struct uthread *uthread, void *junk)
+{
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+       __pthread_generic_yield(pthread);
+       pthread->state = PTH_BLK_YIELDING;
+       /* just immediately restart it */
+       pth_thread_runnable(uthread);
+}
+
+/* Cooperative yielding of the processor, to allow other threads to run */
 int pthread_yield(void)
 {
-       struct pthread_tcb *caller = (struct pthread_tcb*)current_uthread;
-       caller->state = PTH_BLK_YIELDING;
-       uthread_yield(TRUE, pth_thread_yield, 0);
+       uthread_yield(TRUE, __pth_yield_cb, 0);
        return 0;
 }
 
+int pthread_cancel(pthread_t __th)
+{
+       fprintf(stderr, "Unsupported %s!", __FUNCTION__);
+       abort();
+       return -1;
+}
+
+void pthread_cleanup_push(void (*routine)(void *), void *arg)
+{
+       struct pthread_tcb *p = pthread_self();
+       struct pthread_cleanup_routine *r = malloc(sizeof(*r));
+       r->routine = routine;
+       r->arg = arg;
+       SLIST_INSERT_HEAD(&p->cr_stack, r, cr_next);
+}
+
+void pthread_cleanup_pop(int execute)
+{
+       struct pthread_tcb *p = pthread_self();
+       struct pthread_cleanup_routine *r = SLIST_FIRST(&p->cr_stack);
+       if (r) {
+               SLIST_REMOVE_HEAD(&p->cr_stack, cr_next);
+               if (execute)
+                       r->routine(r->arg);
+               free(r);
+       }
+}
+
 int pthread_mutexattr_init(pthread_mutexattr_t* attr)
 {
   attr->type = PTHREAD_MUTEX_DEFAULT;
@@ -560,6 +799,24 @@ int pthread_mutex_init(pthread_mutex_t* m, const pthread_mutexattr_t* attr)
   return 0;
 }
 
+/* Helper for spinning sync, returns TRUE if it is okay to keep spinning.
+ *
+ * Alternatives include:
+ *             old_count <= num_vcores() (barrier code, pass in old_count as *state, 
+ *                                        but this only works if every awake pthread
+ *                                        will belong to the barrier).
+ *             just spin for a bit       (use *state to track spins)
+ *             FALSE                     (always is safe)
+ *             etc...
+ * 'threads_ready' isn't too great since sometimes it'll be non-zero when it is
+ * about to become 0.  We really want "I have no threads waiting to run that
+ * aren't going to run on their on unless this core yields instead of spins". */
+/* TODO: consider making this a 2LS op */
+static inline bool safe_to_spin(unsigned int *state)
+{
+       return !threads_ready;
+}
+
 /* Set *spun to 0 when calling this the first time.  It will yield after 'spins'
  * calls.  Use this for adaptive mutexes and such. */
 static inline void spin_to_sleep(unsigned int spins, unsigned int *spun)
@@ -605,83 +862,148 @@ int pthread_mutex_destroy(pthread_mutex_t* m)
 
 int pthread_cond_init(pthread_cond_t *c, const pthread_condattr_t *a)
 {
-  c->attr = a;
-  memset(c->waiters,0,sizeof(c->waiters));
-  memset(c->in_use,0,sizeof(c->in_use));
-  c->next_waiter = 0;
-  return 0;
+       SLIST_INIT(&c->waiters);
+       spin_pdr_init(&c->spdr_lock);
+       if (a) {
+               c->attr_pshared = a->pshared;
+               c->attr_clock = a->clock;
+       } else {
+               c->attr_pshared = PTHREAD_PROCESS_PRIVATE;
+               c->attr_clock = 0;
+       }
+       return 0;
 }
 
 int pthread_cond_destroy(pthread_cond_t *c)
 {
-  return 0;
+       return 0;
+}
+
+static void swap_slists(struct pthread_list *a, struct pthread_list *b)
+{
+       struct pthread_list temp;
+       temp = *a;
+       *a = *b;
+       *b = temp;
+}
+
+static void wake_slist(struct pthread_list *to_wake)
+{
+       unsigned int nr_woken = 0;      /* assuming less than 4 bil threads */
+       struct pthread_tcb *pthread_i, *pth_temp;
+       /* Amortize the lock grabbing over all restartees */
+       mcs_pdr_lock(&queue_lock);
+       /* Do the work of pth_thread_runnable().  We're in uth context here, but I
+        * think it's okay.  When we need to (when locking) we drop into VC ctx, as
+        * far as the kernel and other cores are concerned. */
+       SLIST_FOREACH_SAFE(pthread_i, to_wake, sl_next, pth_temp) {
+               pthread_i->state = PTH_RUNNABLE;
+               nr_woken++;
+               TAILQ_INSERT_TAIL(&ready_queue, pthread_i, tq_next);
+       }
+       threads_ready += nr_woken;
+       mcs_pdr_unlock(&queue_lock);
+       vcore_request_more(threads_ready);
 }
 
 int pthread_cond_broadcast(pthread_cond_t *c)
 {
-  memset(c->waiters,0,sizeof(c->waiters));
-  return 0;
+       struct pthread_list restartees = SLIST_HEAD_INITIALIZER(restartees);
+       spin_pdr_lock(&c->spdr_lock);
+       swap_slists(&restartees, &c->waiters);
+       spin_pdr_unlock(&c->spdr_lock);
+       wake_slist(&restartees);
+       return 0;
 }
 
+/* spec says this needs to work regardless of whether or not it holds the mutex
+ * already. */
 int pthread_cond_signal(pthread_cond_t *c)
 {
-  int i;
-  for(i = 0; i < MAX_PTHREADS; i++)
-  {
-    if(c->waiters[i])
-    {
-      c->waiters[i] = 0;
-      break;
-    }
-  }
-  return 0;
+       struct pthread_tcb *pthread;
+       spin_pdr_lock(&c->spdr_lock);
+       pthread = SLIST_FIRST(&c->waiters);
+       if (!pthread) {
+               spin_pdr_unlock(&c->spdr_lock);
+               return 0;
+       }
+       SLIST_REMOVE_HEAD(&c->waiters, sl_next);
+       spin_pdr_unlock(&c->spdr_lock);
+       pth_thread_runnable((struct uthread*)pthread);
+       return 0;
 }
 
-int pthread_cond_wait(pthread_cond_t *c, pthread_mutex_t *m)
+/* Communicate btw cond_wait and its callback */
+struct cond_junk {
+       pthread_cond_t                          *c;
+       pthread_mutex_t                         *m;
+};
+
+/* Callback/bottom half of cond wait.  For those writing these pth callbacks,
+ * the minimum is call generic, set state (communicate with runnable), then do
+ * something that causes it to be runnable in the future (or right now). */
+static void __pth_wait_cb(struct uthread *uthread, void *junk)
 {
-  uint32_t old_waiter = c->next_waiter;
-  uint32_t my_waiter = c->next_waiter;
-  
-  //allocate a slot
-  while (atomic_swap_u32(& (c->in_use[my_waiter]), SLOT_IN_USE) == SLOT_IN_USE)
-  {
-    my_waiter = (my_waiter + 1) % MAX_PTHREADS;
-    assert (old_waiter != my_waiter);  // do not want to wrap around
-  }
-  c->waiters[my_waiter] = WAITER_WAITING;
-  c->next_waiter = (my_waiter+1) % MAX_PTHREADS;  // race on next_waiter but ok, because it is advisary
-
-  pthread_mutex_unlock(m);
-
-  volatile int* poll = &c->waiters[my_waiter];
-  while(*poll);
-  c->in_use[my_waiter] = SLOT_FREE;
-  pthread_mutex_lock(m);
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+       pthread_cond_t *c = ((struct cond_junk*)junk)->c;
+       pthread_mutex_t *m = ((struct cond_junk*)junk)->m;
+       /* this removes us from the active list; we can reuse next below */
+       __pthread_generic_yield(pthread);
+       pthread->state = PTH_BLK_MUTEX;
+       spin_pdr_lock(&c->spdr_lock);
+       SLIST_INSERT_HEAD(&c->waiters, pthread, sl_next);
+       spin_pdr_unlock(&c->spdr_lock);
+       pthread_mutex_unlock(m);
+}
 
-  return 0;
+int pthread_cond_wait(pthread_cond_t *c, pthread_mutex_t *m)
+{
+       struct cond_junk local_junk;
+       local_junk.c = c;
+       local_junk.m = m;
+       uthread_yield(TRUE, __pth_wait_cb, &local_junk);
+       pthread_mutex_lock(m);
+       return 0;
 }
 
 int pthread_condattr_init(pthread_condattr_t *a)
 {
-  a = PTHREAD_PROCESS_PRIVATE;
-  return 0;
+       a->pshared = PTHREAD_PROCESS_PRIVATE;
+       a->clock = 0;
+       return 0;
 }
 
 int pthread_condattr_destroy(pthread_condattr_t *a)
 {
-  return 0;
+       return 0;
+}
+
+int pthread_condattr_getpshared(pthread_condattr_t *a, int *s)
+{
+       *s = a->pshared;
+       return 0;
 }
 
 int pthread_condattr_setpshared(pthread_condattr_t *a, int s)
 {
-  a->pshared = s;
-  return 0;
+       a->pshared = s;
+       if (s == PTHREAD_PROCESS_SHARED) {
+               printf("Warning: we don't do shared pthread condvars btw diff MCPs\n");
+               return -1;
+       }
+       return 0;
 }
 
-int pthread_condattr_getpshared(pthread_condattr_t *a, int *s)
+int pthread_condattr_getclock(const pthread_condattr_t *attr,
+                              clockid_t *clock_id)
 {
-  *s = a->pshared;
-  return 0;
+       *clock_id = attr->clock;
+}
+
+int pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
+{
+       printf("Warning: we don't do pthread condvar clock stuff\n");
+       attr->clock = clock_id;
 }
 
 pthread_t pthread_self()
@@ -694,17 +1016,6 @@ int pthread_equal(pthread_t t1, pthread_t t2)
   return t1 == t2;
 }
 
-/* This function cannot be migrated to a different vcore by the userspace
- * scheduler.  Will need to sort that shit out. */
-void pthread_exit(void *ret)
-{
-       struct pthread_tcb *pthread = pthread_self();
-       pthread->retval = ret;
-       /* So our pth_thread_yield knows we want to exit */
-       pthread->state = PTH_EXITING;
-       uthread_yield(FALSE, pth_thread_yield, 0);
-}
-
 int pthread_once(pthread_once_t* once_control, void (*init_routine)(void))
 {
   if (atomic_swap_u32(once_control, 1) == 0)
@@ -712,45 +1023,118 @@ int pthread_once(pthread_once_t* once_control, void (*init_routine)(void))
   return 0;
 }
 
-int pthread_barrier_init(pthread_barrier_t* b, const pthread_barrierattr_t* a, int count)
+int pthread_barrier_init(pthread_barrier_t *b,
+                         const pthread_barrierattr_t *a, int count)
 {
-  b->nprocs = b->count = count;
-  b->sense = 0;
-  pthread_mutex_init(&b->pmutex, 0);
-  return 0;
+       b->total_threads = count;
+       b->sense = 0;
+       atomic_set(&b->count, count);
+       spin_pdr_init(&b->lock);
+       SLIST_INIT(&b->waiters);
+       b->nr_waiters = 0;
+       return 0;
 }
 
-int pthread_barrier_wait(pthread_barrier_t* b)
-{
-  unsigned int spinner = 0;
-  int ls = !b->sense;
+struct barrier_junk {
+       pthread_barrier_t                               *b;
+       int                                                             ls;
+};
 
-  pthread_mutex_lock(&b->pmutex);
-  int count = --b->count;
-  pthread_mutex_unlock(&b->pmutex);
+/* Callback/bottom half of barrier. */
+static void __pth_barrier_cb(struct uthread *uthread, void *junk)
+{
+       struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+       pthread_barrier_t *b = ((struct barrier_junk*)junk)->b;
+       int ls = ((struct barrier_junk*)junk)->ls;
+       /* Removes from active list, we can reuse.  must also restart */
+       __pthread_generic_yield(pthread);
+       /* TODO: if we used a trylock, we could bail as soon as we see sense */
+       spin_pdr_lock(&b->lock);
+       /* If sense is ls (our free value), we lost the race and shouldn't sleep */
+       if (b->sense == ls) {
+               /* TODO: i'd like to fast-path the wakeup, skipping pth_runnable */
+               pthread->state = PTH_BLK_YIELDING;      /* not sure which state for this */
+               spin_pdr_unlock(&b->lock);
+               pth_thread_runnable(uthread);
+               return;
+       }
+       /* otherwise, we sleep */
+       pthread->state = PTH_BLK_MUTEX; /* TODO: consider ignoring this */
+       SLIST_INSERT_HEAD(&b->waiters, pthread, sl_next);
+       b->nr_waiters++;
+       spin_pdr_unlock(&b->lock);
+}
 
-  if(count == 0)
-  {
-    printd("Thread %d is last to hit the barrier, resetting...\n", pthread_self()->id);
-    b->count = b->nprocs;
-       wmb();
-    b->sense = ls;
-    return PTHREAD_BARRIER_SERIAL_THREAD;
-  }
-  else
-  {
-    while(b->sense != ls) {
-      cpu_relax();
-      spin_to_sleep(PTHREAD_BARRIER_SPINS, &spinner);
-    }
-    return 0;
-  }
+/* We assume that the same threads participating in the barrier this time will
+ * also participate next time.  Imagine a thread stopped right after its fetch
+ * and add - we know it is coming through eventually.  We finish and change the
+ * sense, which should allow the delayed thread to eventually break through.
+ * But if another n threads come in first, we'll set the sense back to the old
+ * value, thereby catching the delayed thread til the next barrier. 
+ *
+ * A note on preemption: if any thread gets preempted and it is never dealt
+ * with, eventually we deadlock, with all threads waiting on the last one to
+ * enter (and any stragglers from one run will be the last in the next run).
+ * One way or another, we need to handle preemptions.  The current 2LS requests
+ * an IPI for a preempt, so we'll be fine.  Any other strategies will need to
+ * consider how barriers work.  Any time we sleep, we'll be okay (since that
+ * frees up our core to handle preemptions/run other threads. */
+int pthread_barrier_wait(pthread_barrier_t *b)
+{
+       unsigned int spin_state = 0;
+       int ls = !b->sense;     /* when b->sense is the value we read, then we're free*/
+       struct pthread_list restartees = SLIST_HEAD_INITIALIZER(restartees);
+       struct pthread_tcb *pthread_i;
+       struct barrier_junk local_junk;
+       
+       long old_count = atomic_fetch_and_add(&b->count, -1);
+
+       if (old_count == 1) {
+               printd("Thread %d is last to hit the barrier, resetting...\n",
+                      pthread_self()->id);
+               /* TODO: we might want to grab the lock right away, so a few short
+                * circuit faster? */
+               atomic_set(&b->count, b->total_threads);
+               /* we still need to maintain ordering btw count and sense, in case
+                * another thread doesn't sleep (if we wrote sense first, they could
+                * break out, race around, and muck with count before it is time) */
+               /* wmb(); handled by the spin lock */
+               spin_pdr_lock(&b->lock);
+               /* Sense is only protected in addition to decisions to sleep */
+               b->sense = ls;  /* set to free everyone */
+               /* All access to nr_waiters is protected by the lock */
+               if (!b->nr_waiters) {
+                       spin_pdr_unlock(&b->lock);
+                       return PTHREAD_BARRIER_SERIAL_THREAD;
+               }
+               swap_slists(&restartees, &b->waiters);
+               b->nr_waiters = 0;
+               spin_pdr_unlock(&b->lock);
+               wake_slist(&restartees);
+               return PTHREAD_BARRIER_SERIAL_THREAD;
+       } else {
+               /* Spin if there are no other threads to run.  No sense sleeping */
+               do {
+                       if (b->sense == ls)
+                               return 0;
+                       cpu_relax();
+               } while (safe_to_spin(&spin_state));
+
+               /* Try to sleep, when we wake/return, we're free to go */
+               local_junk.b = b;
+               local_junk.ls = ls;
+               uthread_yield(TRUE, __pth_barrier_cb, &local_junk);
+               // assert(b->sense == ls);
+               return 0;
+       }
 }
 
-int pthread_barrier_destroy(pthread_barrier_tb)
+int pthread_barrier_destroy(pthread_barrier_t *b)
 {
-  pthread_mutex_destroy(&b->pmutex);
-  return 0;
+       assert(SLIST_EMPTY(&b->waiters));
+       assert(!b->nr_waiters);
+       /* Free any locks (if we end up using an MCS) */
+       return 0;
 }
 
 int pthread_detach(pthread_t thread)
@@ -759,3 +1143,171 @@ int pthread_detach(pthread_t thread)
        thread->detached = TRUE;
        return 0;
 }
+
+int pthread_kill(pthread_t thread, int signo)
+{
+       return uthread_signal(&thread->uthread, signo);
+}
+
+int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+       int ret = sigprocmask(how, set, oset);
+
+       /* Ensures any pending signals we just unmasked get processed. */
+       if (set && ret == 0)
+               pthread_yield();
+       return ret;
+}
+
+int pthread_sigqueue(pthread_t *thread, int sig, const union sigval value)
+{
+       printf("pthread_sigqueue is not yet implemented!");
+       return -1;
+}
+
+int pthread_key_create(pthread_key_t *key, void (*destructor)(void*))
+{
+       *key = dtls_key_create(destructor);
+       assert(key);
+       return 0;
+}
+
+int pthread_key_delete(pthread_key_t key)
+{
+       dtls_key_delete(key);
+       return 0;
+}
+
+void *pthread_getspecific(pthread_key_t key)
+{
+       return get_dtls(key);
+}
+
+int pthread_setspecific(pthread_key_t key, const void *value)
+{
+       set_dtls(key, (void*)value);
+       return 0;
+}
+
+
+/* Scheduling Stuff */
+
+static bool policy_is_supported(int policy)
+{
+       /* As our scheduler changes, we can add more policies here */
+       switch (policy) {
+               case SCHED_FIFO:
+                       return TRUE;
+               default:
+                       return FALSE;
+       }
+}
+
+int pthread_attr_setschedparam(pthread_attr_t *attr,
+                               const struct sched_param *param)
+{
+       /* The set of acceptable priorities are based on the scheduling policy.
+        * We'll just accept any old number, since we might not know the policy
+        * yet.  I didn't see anything in the man pages saying attr had to have a
+        * policy set before setting priority. */
+       attr->sched_priority = param->sched_priority;
+       return 0;
+}
+
+int pthread_attr_getschedparam(pthread_attr_t *attr,
+                               struct sched_param *param)
+{
+       param->sched_priority = attr->sched_priority;
+       return 0;
+}
+
+int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
+{
+       if (!policy_is_supported(policy))
+               return -EINVAL;
+       attr->sched_policy = policy;
+       return 0;
+}
+
+int pthread_attr_getschedpolicy(pthread_attr_t *attr, int *policy)
+{
+       *policy = attr->sched_policy;
+       return 0;
+}
+
+/* We only support SCOPE_PROCESS, so we don't even use the attr. */
+int pthread_attr_setscope(pthread_attr_t *attr, int scope)
+{
+       if (scope != PTHREAD_SCOPE_PROCESS)
+               return -ENOTSUP;
+       return 0;
+}
+
+int pthread_attr_getscope(pthread_attr_t *attr, int *scope)
+{
+       *scope = PTHREAD_SCOPE_PROCESS;
+       return 0;
+}
+
+/* Inheritance refers to policy, priority, scope */
+int pthread_attr_setinheritsched(pthread_attr_t *attr,
+                                 int inheritsched)
+{
+       switch (inheritsched) {
+               case PTHREAD_INHERIT_SCHED:
+               case PTHREAD_EXPLICIT_SCHED:
+                       break;
+               default:
+                       return -EINVAL;
+       }
+       attr->sched_inherit = inheritsched;
+       return 0;
+}
+
+int pthread_attr_getinheritsched(const pthread_attr_t *attr,
+                                 int *inheritsched)
+{
+       *inheritsched = attr->sched_inherit;
+       return 0;
+}
+
+int pthread_setschedparam(pthread_t thread, int policy,
+                           const struct sched_param *param)
+{
+       if (!policy_is_supported(policy))
+               return -EINVAL;
+       thread->sched_policy = policy;
+       /* We actually could check if the priority falls in the range of the
+        * specified policy here, since we have both policy and priority. */
+       thread->sched_priority = param->sched_priority;
+       return 0;
+}
+
+int pthread_getschedparam(pthread_t thread, int *policy,
+                           struct sched_param *param)
+{
+       *policy = thread->sched_policy;
+       param->sched_priority = thread->sched_priority;
+       return 0;
+}
+
+
+/* Unsupported Stuff */
+
+int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
+                                       const struct timespec *__restrict
+                                       __abstime)
+{
+       fprintf(stderr, "Unsupported %s!", __FUNCTION__);
+       abort();
+       return -1;
+}
+
+int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
+                                  pthread_mutex_t *__restrict __mutex,
+                                  const struct timespec *__restrict __abstime)
+{
+       fprintf(stderr, "Unsupported %s!", __FUNCTION__);
+       abort();
+       return -1;
+}