net: Make select() not spurious
[akaros.git] / user / vmm / sched.c
index e9d8ab8..32c420e 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <vmm/sched.h>
 #include <vmm/vmm.h>
+#include <vmm/vthread.h>
 #include <sys/mman.h>
 #include <stdlib.h>
 #include <assert.h>
@@ -296,12 +297,22 @@ static void __attribute__((noreturn)) vmm_sched_entry(void)
 {
        struct vmm_thread *vth;
 
-       if (sched_is_greedy())
+       if (sched_is_greedy()) {
                vth = sched_pick_thread_greedy();
-       else
+               if (!vth) {
+                       /* sys_halt_core will return, but we need to restart the vcore.  We
+                        * might have woke due to an event, and we'll need to handle_events
+                        * and other things dealt with by uthreads. */
+                       if (vcore_id() == 0)
+                               sys_halt_core(0);
+                       /* In greedy mode, yield will abort and we'll just restart */
+                       vcore_yield_or_restart();
+               }
+       } else {
                vth = sched_pick_thread_nice();
-       if (!vth)
-               vcore_yield_or_restart();
+               if (!vth)
+                       vcore_yield_or_restart();
+       }
        stats_run_vth(vth);
        run_uthread((struct uthread*)vth);
 }
@@ -463,24 +474,43 @@ static void task_thread_dtor(void *obj, void *priv)
 {
        struct task_thread *tth = (struct task_thread*)obj;
 
+       uthread_cleanup((struct uthread*)tth);
        __free_stack(tth->stacktop, tth->stacksize);
 }
 
-static void vmm_thread_exited(struct uthread *uth)
+static void task_thread_exit(struct task_thread *tth)
 {
-       struct vmm_thread *vth = (struct vmm_thread*)uth;
-       struct task_thread *tth = (struct task_thread*)uth;
-
-       /* Catch bugs.  Right now, only tasks threads can exit. */
-       assert(vth->type == VMM_THREAD_TASK);
+       struct uthread *uth = (struct uthread*)tth;
 
-       acct_thread_blocked((struct vmm_thread*)tth);
-       uthread_cleanup(uth);
        if (uth->flags & UTHREAD_IS_THREAD0)
                return;
        kmem_cache_free(task_thread_cache, tth);
 }
 
+static void ctlr_thread_exit(struct ctlr_thread *cth)
+{
+       __vthread_exited((struct vthread*)cth->buddy);
+}
+
+static void vmm_thread_exited(struct uthread *uth)
+{
+       struct vmm_thread *vth = (struct vmm_thread*)uth;
+
+       assert(vth->type != VMM_THREAD_GUEST);
+
+       acct_thread_blocked(vth);
+       switch (vth->type) {
+       case VMM_THREAD_TASK:
+               task_thread_exit((struct task_thread*)uth);
+               break;
+       case VMM_THREAD_CTLR:
+               ctlr_thread_exit((struct ctlr_thread*)uth);
+               break;
+       case VMM_THREAD_GUEST:
+               panic("Guest threads shouldn't be able to exit");
+       }
+}
+
 static void destroy_guest_thread(struct guest_thread *gth)
 {
        struct ctlr_thread *cth = gth->buddy;
@@ -492,9 +522,9 @@ static void destroy_guest_thread(struct guest_thread *gth)
        free(gth);
 }
 
-static struct guest_thread *create_guest_thread(struct virtual_machine *vm,
-                                                unsigned int gpcoreid,
-                                                struct vmm_gpcore_init *gpci)
+struct guest_thread *create_guest_thread(struct virtual_machine *vm,
+                                         unsigned int gpcoreid,
+                                         struct vmm_gpcore_init *gpci)
 {
        struct guest_thread *gth;
        struct ctlr_thread *cth;
@@ -543,7 +573,7 @@ static void ev_handle_diag(struct event_msg *ev_msg, unsigned int ev_type,
 
        fprintf(stderr, "\nSCHED stats:\n---------------\n");
        for (int i = 0; i < vm->nr_gpcs; i++) {
-               gth = vm->gths[i];
+               gth = gpcid_to_gth(vm, i);
                cth = gth->buddy;
                fprintf(stderr, "\tGPC %2d: %lu resched, %lu gth runs, %lu ctl runs, %lu user-handled vmexits\n",
                                i,
@@ -597,7 +627,8 @@ int vmm_init(struct virtual_machine *vm, struct vmm_gpcore_init *gpcis,
                        return -1;
                }
        }
-       vm->gths = gths;
+       wmb(); /* All gths posted before advertising. */
+       vm->__gths = gths;
        uthread_mcp_init();
        register_ev_handler(EV_FREE_APPLE_PIE, ev_handle_diag, NULL);
        if (sched_is_greedy()) {
@@ -605,7 +636,8 @@ int vmm_init(struct virtual_machine *vm, struct vmm_gpcore_init *gpcis,
                assert(greedy_rnbl_guests);
                vcore_request_total(sched_nr_greedy_cores());
                syscall(SYS_vmm_ctl, VMM_CTL_SET_EXITS,
-                       syscall(SYS_vmm_ctl, VMM_CTL_GET_EXITS) & ~VMM_CTL_EXIT_HALT);
+                       syscall(SYS_vmm_ctl, VMM_CTL_GET_EXITS) &
+                               ~(VMM_CTL_EXIT_HALT | VMM_CTL_EXIT_MWAIT));
        }
        return 0;
 }
@@ -732,6 +764,8 @@ static void enqueue_vmm_thread(struct vmm_thread *vth)
                spin_pdr_lock(&queue_lock);
                TAILQ_INSERT_TAIL(&rnbl_tasks, vth, tq_next);
                spin_pdr_unlock(&queue_lock);
+               if (sched_is_greedy())
+                       vcore_wake(0, false);
                break;
        default:
                panic("Bad vmm_thread type %p\n", vth->type);