Removes __proc_set_allcores()
[akaros.git] / kern / src / resource.c
index 52d5ed5..fdbffe6 100644 (file)
@@ -41,31 +41,32 @@ ssize_t core_request(struct proc *p)
        ssize_t amt_new;
        uint32_t corelist[MAX_NUM_CPUS];
        bool need_to_idle = FALSE;
-       bool self_ipi_pending = FALSE;
+       int8_t state = 0;
 
-       /* There are a few things broken for now if you don't have a current_tf */
-       assert(current_tf);
        spin_lock(&p->proc_lock);
        if (p->state == PROC_DYING) {
-               return -EFAIL;
                spin_unlock(&p->proc_lock);
+               return -EFAIL;
        }
        /* check to see if this is a full deallocation.  for cores, it's a
         * transition from _M to _S.  Will be issues with handling this async. */
        if (!p->resources[RES_CORES].amt_wanted) {
+               printk("[kernel] trying to transition _M -> _S (deprecated)!\n");
                assert(p->state == PROC_RUNNING_M); // TODO: (ACR) async core req
                /* save the context, to be restarted in _S mode */
+               disable_irqsave(&state);        /* protect cur_tf */
+               assert(current_tf);
                p->env_tf = *current_tf;
-               current_tf = 0;                 /* Make sure it isn't used in the future */
+               clear_owning_proc(core_id());   /* so we don't restart */
+               enable_irqsave(&state);
                env_push_ancillary_state(p); // TODO: (HSS)
                /* sending death, since it's not our job to save contexts or anything in
                 * this case.  also, if this returns true, we will not return down
                 * below, and need to eat the reference to p */
-               self_ipi_pending = __proc_take_allcores(p, __death, 0, 0, 0);
+               __proc_take_allcores(p, __death, 0, 0, 0);
                __proc_set_state(p, PROC_RUNNABLE_S);
                schedule_proc(p);
                spin_unlock(&p->proc_lock);
-               __proc_kmsg_pending(p, self_ipi_pending);
                return 0;
        }
        /* Fail if we can never handle this amount (based on how many we told the
@@ -122,12 +123,21 @@ ssize_t core_request(struct proc *p)
                                 * it in the preempt slot so that we can also save the silly
                                 * state. */
                                struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[0];
-                               vcpd->preempt_tf = *current_tf;
+                               disable_irqsave(&state);        /* protect cur_tf */
+                               /* Note this won't play well with concurrent proc kmsgs, but
+                                * since we're _S and locked, we shouldn't have any. */
+                               assert(current_tf);
+                               /* Copy uthread0's context to the notif slot */
+                               vcpd->notif_tf = *current_tf;
+                               clear_owning_proc(core_id());   /* so we don't restart */
                                save_fp_state(&vcpd->preempt_anc);
-                               __seq_start_write(&vcpd->preempt_tf_valid);
-                               /* If we remove this, vcore0 will start where the _S left off */
-                               vcpd->notif_pending = TRUE;
-                               assert(vcpd->notif_enabled);
+                               enable_irqsave(&state);
+                               /* Userspace needs to not fuck with notif_disabled before
+                                * transitioning to _M. */
+                               if (vcpd->notif_disabled) {
+                                       printk("[kernel] user bug: notifs disabled for vcore 0\n");
+                                       vcpd->notif_disabled = FALSE;
+                               }
                                /* in the async case, we'll need to remotely stop and bundle
                                 * vcore0's TF.  this is already done for the sync case (local
                                 * syscall). */
@@ -136,12 +146,13 @@ ssize_t core_request(struct proc *p)
                                __seq_start_write(&p->procinfo->coremap_seqctr);
                                // TODO: (VC#) might need to adjust num_vcores
                                // TODO: (ACR) will need to unmap remotely (receive-side)
-                               __unmap_vcore(p, 0);
+                               __unmap_vcore(p, 0);    /* VC# keep in sync with proc_run _S */
                                __seq_end_write(&p->procinfo->coremap_seqctr);
                                // will need to give up this core / idle later (sync)
                                need_to_idle = TRUE;
                                // change to runnable_m (it's TF is already saved)
                                __proc_set_state(p, PROC_RUNNABLE_M);
+                               p->procinfo->is_mcp = TRUE;
                                break;
                        case (PROC_RUNNABLE_S):
                                /* Issues: being on the runnable_list, proc_set_state not liking
@@ -156,23 +167,14 @@ ssize_t core_request(struct proc *p)
                                break;
                }
                /* give them the cores.  this will start up the extras if RUNNING_M. */
-               self_ipi_pending = __proc_give_cores(p, corelist, num_granted);
+               __proc_give_cores(p, corelist, num_granted);
                spin_unlock(&p->proc_lock);
-               // TODO: (RMS) think about this, esp when its called from a scheduler
-               __proc_kmsg_pending(p, self_ipi_pending);
                /* if there's a race on state (like DEATH), it'll get handled by
-                * proc_run or proc_destroy */
+                * proc_run or proc_destroy.  TODO: Theoretical race here, since someone
+                * else could make p an _S (in theory), and then we would be calling
+                * this with an inedible ref (which is currently a concern). */
                if (p->state == PROC_RUNNABLE_M)
-                       proc_run(p);
-               /* if we are moving to a partitionable core from a RUNNING_S on a
-                * management core, the kernel needs to do something else on this core
-                * (just like in proc_destroy).  it also needs to decref, to consume the
-                * reference that came into this function (since we don't return).  */
-               if (need_to_idle) {
-                       kref_put(&p->kref);
-                       abandon_core();
-                       smp_idle();
-               }
+                       proc_run(p);    /* I dislike this - caller should run it */
        } else { // nothing granted, just return
                spin_unlock(&p->proc_lock);
        }
@@ -235,12 +237,12 @@ void print_resources(struct proc *p)
 
 void print_all_resources(void)
 {
-       spin_lock(&pid_hash_lock);
-       if (hashtable_count(pid_hash)) {
-               hashtable_itr_t *phtable_i = hashtable_iterator(pid_hash);
-               do {
-                       print_resources(hashtable_iterator_value(phtable_i));
-               } while (hashtable_iterator_advance(phtable_i));
+       /* Hash helper */
+       void __print_resources(void *item)
+       {
+               print_resources((struct proc*)item);
        }
+       spin_lock(&pid_hash_lock);
+       hash_for_each(pid_hash, __print_resources);
        spin_unlock(&pid_hash_lock);
 }