Fix a deadlock bug in MCS-PDR locks
[akaros.git] / user / parlib / thread0_sched.c
index 6246dc6..64a7842 100644 (file)
@@ -29,6 +29,7 @@ struct schedule_ops thread0_2ls_ops = {
        .thread_blockon_sysc = thread0_thread_blockon_sysc,
        .thread_refl_fault = thread0_thread_refl_fault,
        .thread_runnable = thread0_thread_runnable,
+       .thread_paused = thread0_thread_runnable,
        .thread_has_blocked = thread0_thread_has_blocked,
 };
 
@@ -42,10 +43,21 @@ struct thread0_info {
        bool                                            is_blocked;
 };
 static struct thread0_info thread0_info;
+static struct event_queue *sysc_evq;
+
+static void thread0_handle_syscall(struct event_msg *ev_msg,
+                                   unsigned int ev_type, void *data)
+{
+       thread0_info.is_blocked = FALSE;
+}
 
 void thread0_lib_init(void)
 {
        memset(&thread0_info, 0, sizeof(thread0_info));
+       /* we don't care about the message, so don't bother with a UCQ */
+       sysc_evq = get_eventq(EV_MBOX_BITMAP);
+       sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
+       register_ev_handler(EV_SYSCALL, thread0_handle_syscall, 0);
 }
 
 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
@@ -53,11 +65,13 @@ static void thread0_sched_entry(void)
 {
        /* TODO: support signal handling whenever we run a uthread */
        if (current_uthread) {
+               uthread_prep_pending_signals(current_uthread);
                run_current_uthread();
                assert(0);
        }
        while (1) {
                if (!thread0_info.is_blocked) {
+                       uthread_prep_pending_signals(thread0_uth);
                        run_uthread(thread0_uth);
                        assert(0);
                }
@@ -69,30 +83,9 @@ static void thread0_sched_entry(void)
 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
 {
        struct syscall *sysc = (struct syscall*)arg;
-       /* We're in vcore context.  Regardless of what we do here, we'll pop back in
-        * to vcore entry, just like with any uthread_yield.  We don't have a 2LS,
-        * but we always have one uthread: the SCP's thread0.  Note that at this
-        * point, current_uthread is still set, but will be cleared as soon as the
-        * callback returns (and before we start over in vcore_entry).
-        *
-        * If notif_pending is already set (due to a concurrent signal), we'll fail
-        * to yield.  Once in VC ctx, we'll handle any other signals/events that
-        * arrived, then restart the uthread that issued the syscall, which if the
-        * syscall isn't done yet, will just blockon again.
-        *
-        * The one trick is that we don't want to register the evq twice.  The way
-        * register_evq currently works, if a SC completed (SC_DONE) while we were
-        * registering, we could end up clearing sysc->ev_q before the kernel sees
-        * it.  We'll use u_data to track whether we registered or not. */
-       #define U_DATA_BLOB ((void*)0x55555555)
-       if ((sysc->u_data == U_DATA_BLOB)
-           || register_evq(sysc, &__ros_scp_simple_evq)) {
-               sysc->u_data = U_DATA_BLOB;
-               /* Sending false for now - we want to signal proc code that we want to
-                * wait (piggybacking on the MCP meaning of this variable).  If
-                * notif_pending is set, the kernel will immediately return us. */
-               __ros_syscall_noerrno(SYS_yield, FALSE, 0, 0, 0, 0, 0);
-       }
+       thread0_thread_has_blocked(uthread, 0);
+       if (!register_evq(sysc, sysc_evq))
+               thread0_thread_runnable(uthread);
 }
 
 static void thread0_thread_refl_fault(struct uthread *uthread,