sys_read() from /dev/stdin
[akaros.git] / kern / src / resource.c
index 2024518..266f80c 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <resource.h>
 #include <process.h>
+#include <smp.h>
 #include <stdio.h>
 #include <assert.h>
 #include <schedule.h>
@@ -42,33 +43,40 @@ ssize_t core_request(struct proc *p)
        bool need_to_idle = FALSE;
        bool self_ipi_pending = FALSE;
 
-       spin_lock_irqsave(&p->proc_lock);
+       spin_lock(&p->proc_lock);
        /* check to see if this is a full deallocation.  for cores, it's a
         * transition from _M to _S.  Will be issues with handling this async. */
        if (!p->resources[RES_CORES].amt_wanted) {
                assert(p->state == PROC_RUNNING_M); // TODO: (ACR) async core req
                // save the context, to be restarted in _S mode
                p->env_tf = *current_tf;
-               env_push_ancillary_state(p);
-               proc_set_syscall_retval(&p->env_tf, ESUCCESS);
+               env_push_ancillary_state(p); // TODO: (HSS)
+               set_retval(ESUCCESS);
                /* sending death, since it's not our job to save contexts or anything in
                 * this case.  also, if this returns true, we will not return down
                 * below, and need to eat the reference to p */
                self_ipi_pending = __proc_take_allcores(p, __death, 0, 0, 0);
                __proc_set_state(p, PROC_RUNNABLE_S);
                schedule_proc(p);
-               __proc_unlock_ipi_pending(p, self_ipi_pending);
+               spin_unlock(&p->proc_lock);
+               __proc_kmsg_pending(p, self_ipi_pending);
                return 0;
        }
+       /* Fail if we can never handle this amount (based on how many we told the
+        * process it can get). */
+       if (p->resources[RES_CORES].amt_wanted > p->procinfo->max_vcores) {
+               spin_unlock(&p->proc_lock);
+               return -EFAIL;
+       }
        /* otherwise, see how many new cores are wanted */
        amt_new = p->resources[RES_CORES].amt_wanted -
                  p->resources[RES_CORES].amt_granted;
        if (amt_new < 0) {
                p->resources[RES_CORES].amt_wanted = p->resources[RES_CORES].amt_granted;
-               spin_unlock_irqsave(&p->proc_lock);
+               spin_unlock(&p->proc_lock);
                return -EINVAL;
        } else if (amt_new == 0) {
-               spin_unlock_irqsave(&p->proc_lock);
+               spin_unlock(&p->proc_lock);
                return 0;
        }
        // else, we try to handle the request
@@ -87,9 +95,11 @@ ssize_t core_request(struct proc *p)
                }
                num_granted = amt_new;
        } else {
+               /* In this case, you might want to preempt or do other fun things... */
                num_granted = 0;
        }
        spin_unlock(&idle_lock);
+
        // Now, actually give them out
        if (num_granted) {
                switch (p->state) {
@@ -97,6 +107,8 @@ ssize_t core_request(struct proc *p)
                                // issue with if we're async or not (need to preempt it)
                                // either of these should trip it. TODO: (ACR) async core req
                                // TODO: relies on vcore0 being the caller (VC#)
+                               // TODO: do this in process.c and use this line:
+                               //if ((current != p) || (get_pcoreid(p, 0) != core_id()))
                                if ((current != p) || (p->procinfo->vcoremap[0].pcoreid != core_id()))
                                        panic("We don't handle async RUNNING_S core requests yet.");
                                /* save the tf so userspace can restart it.  Like in __notify,
@@ -109,6 +121,7 @@ ssize_t core_request(struct proc *p)
                                __seq_start_write(&vcpd->preempt_tf_valid);
                                /* If we remove this, vcore0 will start where the _S left off */
                                vcpd->notif_pending = TRUE;
+                               assert(vcpd->notif_enabled);
                                /* in the async case, we'll need to remotely stop and bundle
                                 * vcore0's TF.  this is already done for the sync case (local
                                 * syscall). */
@@ -136,7 +149,9 @@ ssize_t core_request(struct proc *p)
                }
                /* give them the cores.  this will start up the extras if RUNNING_M. */
                self_ipi_pending = __proc_give_cores(p, corelist, num_granted);
-               __proc_unlock_ipi_pending(p, self_ipi_pending);
+               spin_unlock(&p->proc_lock);
+               // TODO: (RMS) think about this, esp when its called from a scheduler
+               __proc_kmsg_pending(p, self_ipi_pending);
                /* if there's a race on state (like DEATH), it'll get handled by
                 * proc_run or proc_destroy */
                if (p->state == PROC_RUNNABLE_M)
@@ -146,11 +161,11 @@ ssize_t core_request(struct proc *p)
                 * (just like in proc_destroy).  it also needs to decref, to consume the
                 * reference that came into this function (since we don't return).  */
                if (need_to_idle) {
-                       proc_decref(p, 1);
+                       kref_put(&p->kref);
                        abandon_core();
                }
        } else { // nothing granted, just return
-               spin_unlock_irqsave(&p->proc_lock);
+               spin_unlock(&p->proc_lock);
        }
        return num_granted;
 }
@@ -166,23 +181,19 @@ error_t resource_req(struct proc *p, int type, size_t amt_wanted,
                printk("[kernel] Async requests treated synchronously for now.\n");
 
        /* set the desired resource amount in the process's resource list. */
-       spin_lock_irqsave(&p->proc_lock);
+       spin_lock(&p->proc_lock);
        size_t old_amount = p->resources[type].amt_wanted;
        p->resources[type].amt_wanted = amt_wanted;
        p->resources[type].amt_wanted_min = MIN(amt_wanted_min, amt_wanted);
        p->resources[type].flags = flags;
-       spin_unlock_irqsave(&p->proc_lock);
-
-       // no change in the amt_wanted
-       if (old_amount == amt_wanted)
-               return 0;
+       spin_unlock(&p->proc_lock);
 
        switch (type) {
                case RES_CORES:
                        retval = core_request(p);
                        // i don't like this retval hackery
                        if (retval < 0) {
-                               set_errno(current_tf, -retval);
+                               set_errno(-retval);
                                return -1;
                        }
                        else