Removes __proc_set_allcores()
[akaros.git] / kern / src / resource.c
index e27196c..fdbffe6 100644 (file)
@@ -57,7 +57,7 @@ ssize_t core_request(struct proc *p)
                disable_irqsave(&state);        /* protect cur_tf */
                assert(current_tf);
                p->env_tf = *current_tf;
-               current_tf = 0;                 /* Make sure it isn't used in the future */
+               clear_owning_proc(core_id());   /* so we don't restart */
                enable_irqsave(&state);
                env_push_ancillary_state(p); // TODO: (HSS)
                /* sending death, since it's not our job to save contexts or anything in
@@ -124,14 +124,20 @@ ssize_t core_request(struct proc *p)
                                 * state. */
                                struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[0];
                                disable_irqsave(&state);        /* protect cur_tf */
+                               /* Note this won't play well with concurrent proc kmsgs, but
+                                * since we're _S and locked, we shouldn't have any. */
                                assert(current_tf);
-                               vcpd->preempt_tf = *current_tf;
-                               enable_irqsave(&state);
+                               /* Copy uthread0's context to the notif slot */
+                               vcpd->notif_tf = *current_tf;
+                               clear_owning_proc(core_id());   /* so we don't restart */
                                save_fp_state(&vcpd->preempt_anc);
-                               __seq_start_write(&vcpd->preempt_tf_valid);
-                               /* If we remove this, vcore0 will start where the _S left off */
-                               vcpd->notif_pending = TRUE;
-                               assert(vcpd->notif_enabled);
+                               enable_irqsave(&state);
+                               /* Userspace needs to not fuck with notif_disabled before
+                                * transitioning to _M. */
+                               if (vcpd->notif_disabled) {
+                                       printk("[kernel] user bug: notifs disabled for vcore 0\n");
+                                       vcpd->notif_disabled = FALSE;
+                               }
                                /* in the async case, we'll need to remotely stop and bundle
                                 * vcore0's TF.  this is already done for the sync case (local
                                 * syscall). */
@@ -146,7 +152,7 @@ ssize_t core_request(struct proc *p)
                                need_to_idle = TRUE;
                                // change to runnable_m (it's TF is already saved)
                                __proc_set_state(p, PROC_RUNNABLE_M);
-                               p->is_mcp = TRUE;
+                               p->procinfo->is_mcp = TRUE;
                                break;
                        case (PROC_RUNNABLE_S):
                                /* Issues: being on the runnable_list, proc_set_state not liking
@@ -164,18 +170,11 @@ ssize_t core_request(struct proc *p)
                __proc_give_cores(p, corelist, num_granted);
                spin_unlock(&p->proc_lock);
                /* if there's a race on state (like DEATH), it'll get handled by
-                * proc_run or proc_destroy */
+                * proc_run or proc_destroy.  TODO: Theoretical race here, since someone
+                * else could make p an _S (in theory), and then we would be calling
+                * this with an inedible ref (which is currently a concern). */
                if (p->state == PROC_RUNNABLE_M)
                        proc_run(p);    /* I dislike this - caller should run it */
-               /* if we are moving to a partitionable core from a RUNNING_S on a
-                * management core, the kernel needs to do something else on this core
-                * (just like in proc_destroy).  it also needs to decref, to consume the
-                * reference that came into this function (since we don't return).  */
-               if (need_to_idle) {
-                       proc_decref(p);
-                       abandon_core();
-                       smp_idle();
-               }
        } else { // nothing granted, just return
                spin_unlock(&p->proc_lock);
        }