Kick the VMM 2LS when enqueueing a thread
authorBarret Rhoden <brho@cs.berkeley.edu>
Fri, 5 Aug 2016 00:39:11 +0000 (17:39 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 10 Aug 2016 19:36:04 +0000 (15:36 -0400)
Previously, we'd only run the 2LS logic on sched_entry().  If you
created threads and didn't do something triggering sched_entry(), such
as uthread_sleep_forever(), any system call, take a notify/IPI, etc,
then we'd never run that logic and you could deadlock.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
user/vmm/sched.c

index 5ecdf6d..5286796 100644 (file)
@@ -198,24 +198,35 @@ static void yield_current_uth(void)
        enqueue_vmm_thread(vth);
 }
 
-static void __attribute__((noreturn)) vmm_sched_entry(void)
+/* Helper, tries to get the right number of vcores.  Returns TRUE if we think we
+ * have enough, FALSE otherwise.
+ *
+ * TODO: this doesn't handle a lot of issues, like preemption, how to
+ * run/yield our vcores, dynamic changes in the number of runnables, where
+ * to send events, how to avoid interfering with gpcs, etc. */
+static bool try_to_get_vcores(void)
 {
-       struct vmm_thread *vth;
        int nr_vcores_wanted = desired_nr_vcores();
        bool have_enough = nr_vcores_wanted <= num_vcores();
 
-       /* TODO: this doesn't handle a lot of issues, like preemption, how to
-        * run/yield our vcores, dynamic changes in the number of runnables, where
-        * to send events, how to avoid interfering with gpcs, etc. */
        if (have_enough) {
                vcore_tick_disable();
-       } else {
-               vcore_tick_enable(vmm_sched_period_usec);
-               vcore_request_total(nr_vcores_wanted);
-               if (vcore_tick_poll()) {
-                       /* slightly less than ideal: we grab the queue lock twice */
-                       yield_current_uth();
-               }
+               return TRUE;
+       }
+       vcore_tick_enable(vmm_sched_period_usec);
+       vcore_request_total(nr_vcores_wanted);
+       return FALSE;
+}
+
+static void __attribute__((noreturn)) vmm_sched_entry(void)
+{
+       struct vmm_thread *vth;
+       bool have_enough;
+
+       have_enough = try_to_get_vcores();
+       if (!have_enough && vcore_tick_poll()) {
+               /* slightly less than ideal: we grab the queue lock twice */
+               yield_current_uth();
        }
        if (current_uthread)
                run_current_uthread();
@@ -539,6 +550,7 @@ static void enqueue_vmm_thread(struct vmm_thread *vth)
                break;
        }
        spin_pdr_unlock(&queue_lock);
+       try_to_get_vcores();
 }
 
 static struct vmm_thread *alloc_vmm_thread(struct virtual_machine *vm, int type)