#include <sys/queue.h>
#include <frontend.h>
#include <monitor.h>
-#include <resource.h>
#include <elf.h>
#include <arsc_server.h>
#include <devfs.h>
-/* Process Lists */
-struct proc_list proc_runnablelist = TAILQ_HEAD_INITIALIZER(proc_runnablelist);
-spinlock_t runnablelist_lock = SPINLOCK_INITIALIZER;
struct kmem_cache *proc_cache;
/* Other helpers, implemented later. */
static uint32_t try_get_pcoreid(struct proc *p, uint32_t vcoreid);
static uint32_t get_pcoreid(struct proc *p, uint32_t vcoreid);
static void __proc_free(struct kref *kref);
+static bool scp_is_vcctx_ready(struct preempt_data *vcpd);
/* PID management. */
#define PID_MAX 32767 // goes from 0 to 32767, with 0 reserved
/* 0'ing the arguments. Some higher function will need to set them */
memset(p->procinfo->argp, 0, sizeof(p->procinfo->argp));
memset(p->procinfo->argbuf, 0, sizeof(p->procinfo->argbuf));
+ memset(p->procinfo->res_grant, 0, sizeof(p->procinfo->res_grant));
/* 0'ing the vcore/pcore map. Will link the vcores later. */
memset(&p->procinfo->vcoremap, 0, sizeof(p->procinfo->vcoremap));
memset(&p->procinfo->pcoremap, 0, sizeof(p->procinfo->pcoremap));
static void proc_init_procdata(struct proc *p)
{
memset(p->procdata, 0, sizeof(struct procdata));
+ /* processes can't go into vc context on vc 0 til they unset this. This is
+ * for processes that block before initing uthread code (like rtld). */
+ atomic_set(&p->procdata->vcore_preempt_data[0].flags, VC_SCP_NOVCCTX);
}
/* Allocates and initializes a process, with the given parent. Currently
{ INITSTRUCT(*p)
- /* one reference for the proc existing, and one for the ref we pass back. */
- kref_init(&p->p_kref, __proc_free, 2);
+ /* only one ref, which we pass back. the old 'existence' ref is managed by
+ * the ksched */
+ kref_init(&p->p_kref, __proc_free, 1);
// Setup the default map of where to get cache colors from
p->cache_colors_map = global_cache_colors_map;
p->next_cache_color = 0;
/* Set the basic status variables. */
spinlock_init(&p->proc_lock);
p->exitcode = 1337; /* so we can see processes killed by the kernel */
+ init_sem(&p->state_change, 0);
p->ppid = parent ? parent->pid : 0;
p->state = PROC_CREATED; /* shouldn't go through state machine for init */
p->env_flags = 0;
p->env_entry = 0; // cheating. this really gets set later
p->heap_top = (void*)UTEXT; /* heap_bottom set in proc_init_procinfo */
- memset(&p->resources, 0, sizeof(p->resources));
memset(&p->env_ancillary_state, 0, sizeof(p->env_ancillary_state));
memset(&p->env_tf, 0, sizeof(p->env_tf));
spinlock_init(&p->mm_lock);
* push setting the state to CREATED into here. */
void __proc_ready(struct proc *p)
{
+ /* Tell the ksched about us */
+ register_proc(p);
spin_lock(&pid_hash_lock);
hashtable_insert(pid_hash, (void*)(long)p->pid, p);
spin_unlock(&pid_hash_lock);
}
}
-/* Dispatches a process to run, either on the current core in the case of a
- * RUNNABLE_S, or on its partition in the case of a RUNNABLE_M. This should
- * never be called to "restart" a core. This expects that the "instructions"
- * for which core(s) to run this on will be in the vcoremap, which needs to be
- * set externally.
+/* Flag says if vcore context is not ready, which is set in init_procdata. The
+ * process must turn off this flag on vcore0 at some point. It's off by default
+ * on all other vcores. */
+static bool scp_is_vcctx_ready(struct preempt_data *vcpd)
+{
+ return !(atomic_read(&vcpd->flags) & VC_SCP_NOVCCTX);
+}
+
+/* Dispatches a _S process to run on the current core. This should never be
+ * called to "restart" a core.
*
- * When a process goes from RUNNABLE_M to RUNNING_M, its vcoremap will be
- * "packed" (no holes in the vcore->pcore mapping), vcore0 will continue to run
- * it's old core0 context, and the other cores will come in at the entry point.
- * Including in the case of preemption.
+ * This will always return, regardless of whether or not the calling core is
+ * being given to a process. (it used to pop the tf directly, before we had
+ * cur_tf).
*
- * This won't return if the current core is going to be running the process as a
- * _S. It will return if the process is an _M. Regardless, proc_run will eat
- * your reference if it does not return. */
-void proc_run(struct proc *p)
+ * Since it always returns, it will never "eat" your reference (old
+ * documentation talks about this a bit). */
+void proc_run_s(struct proc *p)
{
- struct vcore *vc_i;
+ int8_t state = 0;
+ uint32_t coreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+ struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[0];
spin_lock(&p->proc_lock);
-
switch (p->state) {
case (PROC_DYING):
spin_unlock(&p->proc_lock);
- printk("Process %d not starting due to async death\n", p->pid);
- // if we're a worker core, smp_idle, o/w return
- if (!management_core())
- smp_idle(); // this never returns
+ printk("[kernel] _S %d not starting due to async death\n", p->pid);
return;
case (PROC_RUNNABLE_S):
- assert(current != p);
__proc_set_state(p, PROC_RUNNING_S);
/* We will want to know where this process is running, even if it is
* only in RUNNING_S. can use the vcoremap, which makes death easy.
/* TODO: For now, we won't count this as an active vcore (on the
* lists). This gets unmapped in resource.c and yield_s, and needs
* work. */
- __map_vcore(p, 0, core_id()); // sort of. this needs work.
+ __map_vcore(p, 0, coreid); /* not treated like a true vcore */
__seq_end_write(&p->procinfo->coremap_seqctr);
+ /* incref, since we're saving a reference in owning proc later */
+ proc_incref(p, 1);
+ /* disable interrupts to protect cur_tf, owning_proc, and current */
+ disable_irqsave(&state);
+ /* wait til ints are disabled before unlocking, in case someone else
+ * grabs the lock and IPIs us before we get set up in cur_tf */
+ spin_unlock(&p->proc_lock);
+ /* redundant with proc_startcore, might be able to remove that one*/
__set_proc_current(p);
- /* We restartcore, instead of startcore, since startcore is a bit
- * lower level and we want a chance to process kmsgs before starting
- * the process. */
+ /* set us up as owning_proc. ksched bug if there is already one,
+ * for now. can simply clear_owning if we want to. */
+ assert(!pcpui->owning_proc);
+ pcpui->owning_proc = p;
+ pcpui->owning_vcoreid = 0; /* TODO (VC#) */
+ /* TODO: (HSS) set silly state here (__startcore does it instantly) */
+ /* similar to the old __startcore, start them in vcore context if
+ * they have notifs and aren't already in vcore context. o/w, start
+ * them wherever they were before (could be either vc ctx or not) */
+ if (!vcpd->notif_disabled && vcpd->notif_pending
+ && scp_is_vcctx_ready(vcpd)) {
+ vcpd->notif_disabled = TRUE;
+ /* save the _S's tf in the notify slot, build and pop a new one
+ * in actual/cur_tf. */
+ vcpd->notif_tf = p->env_tf;
+ pcpui->cur_tf = &pcpui->actual_tf;
+ memset(pcpui->cur_tf, 0, sizeof(struct trapframe));
+ proc_init_trapframe(pcpui->cur_tf, 0, p->env_entry,
+ vcpd->transition_stack);
+ } else {
+ /* If they have no transition stack, then they can't receive
+ * events. The most they are getting is a wakeup from the
+ * kernel. They won't even turn off notif_pending, so we'll do
+ * that for them. */
+ if (!scp_is_vcctx_ready(vcpd))
+ vcpd->notif_pending = FALSE;
+ /* this is one of the few times cur_tf != &actual_tf */
+ pcpui->cur_tf = &p->env_tf;
+ }
+ enable_irqsave(&state);
+ /* When the calling core idles, it'll call restartcore and run the
+ * _S process's context. */
+ return;
+ default:
spin_unlock(&p->proc_lock);
- disable_irq(); /* before mucking with cur_tf / owning_proc */
- /* this is one of the few times cur_tf != &actual_tf */
- current_tf = &p->env_tf; /* no need for irq disable yet */
- /* storing the passed in ref of p in owning_proc */
- per_cpu_info[core_id()].owning_proc = p;
- proc_restartcore(); /* will reenable interrupts */
- break;
+ panic("Invalid process state %p in %s()!!", p->state, __FUNCTION__);
+ }
+}
+
+/* Helper: sends preempt messages to all vcores on the bulk preempt list, and
+ * moves them to the inactive list. */
+static void __send_bulkp_events(struct proc *p)
+{
+ struct vcore *vc_i, *vc_temp;
+ struct event_msg preempt_msg = {0};
+ /* Send preempt messages for any left on the BP list. No need to set any
+ * flags, it all was done on the real preempt. Now we're just telling the
+ * process about any that didn't get restarted and are still preempted. */
+ TAILQ_FOREACH_SAFE(vc_i, &p->bulk_preempted_vcs, list, vc_temp) {
+ /* Note that if there are no active vcores, send_k_e will post to our
+ * own vcore, the last of which will be put on the inactive list and be
+ * the first to be started. We could have issues with deadlocking,
+ * since send_k_e() could grab the proclock (if there are no active
+ * vcores) */
+ preempt_msg.ev_type = EV_VCORE_PREEMPT;
+ preempt_msg.ev_arg2 = vcore2vcoreid(p, vc_i); /* arg2 is 32 bits */
+ send_kernel_event(p, &preempt_msg, 0);
+ /* TODO: we may want a TAILQ_CONCAT_HEAD, or something that does that.
+ * We need a loop for the messages, but not necessarily for the list
+ * changes. */
+ TAILQ_REMOVE(&p->bulk_preempted_vcs, vc_i, list);
+ TAILQ_INSERT_HEAD(&p->inactive_vcs, vc_i, list);
+ }
+}
+
+/* Run an _M. Can be called safely on one that is already running. Hold the
+ * lock before calling. Other than state checks, this just starts up the _M's
+ * vcores, much like the second part of give_cores_running. More specifically,
+ * give_cores_runnable puts cores on the online list, which this then sends
+ * messages to. give_cores_running immediately puts them on the list and sends
+ * the message. the two-step style may go out of fashion soon.
+ *
+ * This expects that the "instructions" for which core(s) to run this on will be
+ * in the vcoremap, which needs to be set externally (give_cores()). */
+void __proc_run_m(struct proc *p)
+{
+ struct vcore *vc_i;
+ switch (p->state) {
+ case (PROC_WAITING):
+ case (PROC_DYING):
+ warn("ksched tried to run proc %d in state %s\n", p->pid,
+ procstate2str(p->state));
+ return;
case (PROC_RUNNABLE_M):
/* vcoremap[i] holds the coreid of the physical core allocated to
* this process. It is set outside proc_run. For the kernel
* message, a0 = struct proc*, a1 = struct trapframe*. */
if (p->procinfo->num_vcores) {
+ __send_bulkp_events(p);
__proc_set_state(p, PROC_RUNNING_M);
/* Up the refcnt, to avoid the n refcnt upping on the
* destination cores. Keep in sync with __startcore */
* turn online */
TAILQ_FOREACH(vc_i, &p->online_vcs, list) {
send_kernel_message(vc_i->pcoreid, __startcore, (long)p,
- 0, 0, KMSG_IMMEDIATE);
+ (long)vcore2vcoreid(p, vc_i), 0,
+ KMSG_IMMEDIATE);
}
} else {
warn("Tried to proc_run() an _M with no vcores!");
}
- /* Unlock and decref/wait for the IPI if one is pending. This will
- * eat the reference if we aren't returning.
- *
- * There a subtle race avoidance here. __proc_startcore can handle
- * a death message, but we can't have the startcore come after the
- * death message. Otherwise, it would look like a new process. So
- * we hold the lock til after we send our message, which prevents a
- * possible death message.
+ /* There a subtle race avoidance here (when we unlock after sending
+ * the message). __proc_startcore can handle a death message, but
+ * we can't have the startcore come after the death message.
+ * Otherwise, it would look like a new process. So we hold the lock
+ * til after we send our message, which prevents a possible death
+ * message.
* - Note there is no guarantee this core's interrupts were on, so
* it may not get the message for a while... */
- spin_unlock(&p->proc_lock);
- break;
+ return;
+ case (PROC_RUNNING_M):
+ return;
default:
+ /* unlock just so the monitor can call something that might lock*/
spin_unlock(&p->proc_lock);
- panic("Invalid process state %p in proc_run()!!", p->state);
+ panic("Invalid process state %p in %s()!!", p->state, __FUNCTION__);
}
}
{
struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
assert(!pcpui->cur_sysc);
+ /* TODO: can probably remove this enable_irq. it was an optimization for
+ * RKMs */
/* Try and get any interrupts before we pop back to userspace. If we didn't
* do this, we'd just get them in userspace, but this might save us some
* effort/overhead. */
__proc_startcore(pcpui->owning_proc, pcpui->cur_tf);
}
-/*
- * Destroys the given process. This may be called from another process, a light
- * kernel thread (no real process context), asynchronously/cross-core, or from
- * the process on its own core.
+/* Destroys the process. This should be called by the ksched, which needs to
+ * hold the lock. It will destroy the process and return any cores allocated to
+ * the proc via pc_arr and nr_revoked. It's up to the caller to have enough
+ * space for pc_arr. This will return TRUE if we successfully killed it, FALSE
+ * otherwise. Failure isn't a big deal either - it can happen due to concurrent
+ * calls to proc_destroy.
*
* Here's the way process death works:
* 0. grab the lock (protects state transition and core map)
* come in, making you abandon_core, as if you weren't running. It may be that
* the only reference to p is the one you passed in, and when you decref, it'll
* get __proc_free()d. */
-void proc_destroy(struct proc *p)
+bool __proc_destroy(struct proc *p, uint32_t *pc_arr, uint32_t *nr_revoked)
{
- spin_lock(&p->proc_lock);
+ struct kthread *sleeper;
switch (p->state) {
case PROC_DYING: // someone else killed this already.
- spin_unlock(&p->proc_lock);
- return;
+ return FALSE;
case PROC_RUNNABLE_M:
/* Need to reclaim any cores this proc might have, even though it's
* not running yet. */
- __proc_take_allcores_dumb(p, FALSE);
+ *nr_revoked = __proc_take_allcores(p, pc_arr, FALSE);
// fallthrough
case PROC_RUNNABLE_S:
- // Think about other lists, like WAITING, or better ways to do this
- deschedule_proc(p);
+ /* might need to pull from lists, though i'm currently a fan of the
+ * model where external refs notice DYING (if it matters to them)
+ * and decref when they are done. the ksched will notice the proc
+ * is dying and handle it accordingly (which delay the reaping til
+ * the next call to schedule()) */
break;
case PROC_RUNNING_S:
#if 0
// TODO: might need to sort num_vcores too later (VC#)
/* vcore is unmapped on the receive side */
__seq_end_write(&p->procinfo->coremap_seqctr);
- #if 0
- /* right now, RUNNING_S only runs on a mgmt core (0), not cores
- * managed by the idlecoremap. so don't do this yet. */
- put_idle_core(get_pcoreid(p, 0));
- #endif
+ /* If we ever have RUNNING_S run on non-mgmt cores, we'll need to
+ * tell the ksched about this now-idle core (after unlocking) */
break;
case PROC_RUNNING_M:
/* Send the DEATH message to every core running this process, and
* deallocate the cores.
* The rule is that the vcoremap is set before proc_run, and reset
* within proc_destroy */
- __proc_take_allcores_dumb(p, FALSE);
+ *nr_revoked = __proc_take_allcores(p, pc_arr, FALSE);
break;
case PROC_CREATED:
break;
default:
- panic("Weird state(%s) in %s()", procstate2str(p->state),
- __FUNCTION__);
+ warn("Weird state(%s) in %s()", procstate2str(p->state),
+ __FUNCTION__);
+ return FALSE;
}
+ /* At this point, a death IPI should be on its way, either from the
+ * RUNNING_S one, or from proc_take_cores with a __death. in general,
+ * interrupts should be on when you call proc_destroy locally, but currently
+ * aren't for all things (like traphandlers). */
__proc_set_state(p, PROC_DYING);
/* This prevents processes from accessing their old files while dying, and
* will help if these files (or similar objects in the future) hold
* references to p (preventing a __proc_free()). */
close_all_files(&p->open_files, FALSE);
- /* This decref is for the process's existence. */
- proc_decref(p);
- /* Unlock. A death IPI should be on its way, either from the RUNNING_S one,
- * or from proc_take_cores with a __death. in general, interrupts should be
- * on when you call proc_destroy locally, but currently aren't for all
- * things (like traphandlers). */
- spin_unlock(&p->proc_lock);
- return;
+ /* Signal our state change. Assuming we only have one waiter right now. */
+ sleeper = __up_sem(&p->state_change, TRUE);
+ if (sleeper)
+ kthread_runnable(sleeper);
+ return TRUE;
+}
+
+/* Turns *p into an MCP. Needs to be called from a local syscall of a RUNNING_S
+ * process. Returns 0 if it succeeded, an error code otherwise. You should
+ * hold the lock before calling. */
+int __proc_change_to_m(struct proc *p)
+{
+ int8_t state = 0;
+ /* in case userspace erroneously tries to change more than once */
+ if (__proc_is_mcp(p))
+ return -EINVAL;
+ switch (p->state) {
+ case (PROC_RUNNING_S):
+ /* issue with if we're async or not (need to preempt it)
+ * either of these should trip it. TODO: (ACR) async core req
+ * TODO: relies on vcore0 being the caller (VC#) */
+ if ((current != p) || (get_pcoreid(p, 0) != core_id()))
+ panic("We don't handle async RUNNING_S core requests yet.");
+ /* save the tf so userspace can restart it. Like in __notify,
+ * this assumes a user tf is the same as a kernel tf. We save
+ * it in the preempt slot so that we can also save the silly
+ * state. */
+ struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[0];
+ disable_irqsave(&state); /* protect cur_tf */
+ /* Note this won't play well with concurrent proc kmsgs, but
+ * since we're _S and locked, we shouldn't have any. */
+ assert(current_tf);
+ /* Copy uthread0's context to the notif slot */
+ vcpd->notif_tf = *current_tf;
+ clear_owning_proc(core_id()); /* so we don't restart */
+ save_fp_state(&vcpd->preempt_anc);
+ enable_irqsave(&state);
+ /* Userspace needs to not fuck with notif_disabled before
+ * transitioning to _M. */
+ if (vcpd->notif_disabled) {
+ printk("[kernel] user bug: notifs disabled for vcore 0\n");
+ vcpd->notif_disabled = FALSE;
+ }
+ /* in the async case, we'll need to remotely stop and bundle
+ * vcore0's TF. this is already done for the sync case (local
+ * syscall). */
+ /* this process no longer runs on its old location (which is
+ * this core, for now, since we don't handle async calls) */
+ __seq_start_write(&p->procinfo->coremap_seqctr);
+ // TODO: (VC#) might need to adjust num_vcores
+ // TODO: (ACR) will need to unmap remotely (receive-side)
+ __unmap_vcore(p, 0); /* VC# keep in sync with proc_run_s */
+ __seq_end_write(&p->procinfo->coremap_seqctr);
+ /* change to runnable_m (it's TF is already saved) */
+ __proc_set_state(p, PROC_RUNNABLE_M);
+ p->procinfo->is_mcp = TRUE;
+ break;
+ case (PROC_RUNNABLE_S):
+ /* Issues: being on the runnable_list, proc_set_state not liking
+ * it, and not clearly thinking through how this would happen.
+ * Perhaps an async call that gets serviced after you're
+ * descheduled? */
+ warn("Not supporting RUNNABLE_S -> RUNNABLE_M yet.\n");
+ return -EINVAL;
+ case (PROC_DYING):
+ warn("Dying, core request coming from %d\n", core_id());
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Old code to turn a RUNNING_M to a RUNNING_S, with the calling context
+ * becoming the new 'thread0'. Don't use this. Caller needs to send in a
+ * pc_arr big enough for all vcores. Will return the number of cores given up
+ * by the proc. */
+uint32_t __proc_change_to_s(struct proc *p, uint32_t *pc_arr)
+{
+ int8_t state = 0;
+ uint32_t num_revoked;
+ printk("[kernel] trying to transition _M -> _S (deprecated)!\n");
+ assert(p->state == PROC_RUNNING_M); // TODO: (ACR) async core req
+ /* save the context, to be restarted in _S mode */
+ disable_irqsave(&state); /* protect cur_tf */
+ assert(current_tf);
+ p->env_tf = *current_tf;
+ clear_owning_proc(core_id()); /* so we don't restart */
+ enable_irqsave(&state);
+ env_push_ancillary_state(p); // TODO: (HSS)
+ /* sending death, since it's not our job to save contexts or anything in
+ * this case. */
+ num_revoked = __proc_take_allcores(p, pc_arr, FALSE);
+ __proc_set_state(p, PROC_RUNNABLE_S);
+ return num_revoked;
}
/* Helper function. Is the given pcore a mapped vcore? No locking involved, be
return try_get_pcoreid(p, vcoreid);
}
-/* Helper function: yields / wraps up current_tf and schedules the _S */
-void __proc_yield_s(struct proc *p, struct trapframe *tf)
+/* Helper: saves the SCP's tf state and unmaps vcore 0. In the future, we'll
+ * probably use vc0's space for env_tf and the silly state. */
+void __proc_save_context_s(struct proc *p, struct trapframe *tf)
{
- assert(p->state == PROC_RUNNING_S);
p->env_tf= *tf;
env_push_ancillary_state(p); /* TODO: (HSS) */
- __unmap_vcore(p, 0); /* VC# keep in sync with proc_run _S */
- __proc_set_state(p, PROC_RUNNABLE_S);
- schedule_proc(p);
+ __unmap_vcore(p, 0); /* VC# keep in sync with proc_run_s */
}
/* Yields the calling core. Must be called locally (not async) for now.
- * - If RUNNING_S, you just give up your time slice and will eventually return.
+ * - If RUNNING_S, you just give up your time slice and will eventually return,
+ * possibly after WAITING on an event.
* - If RUNNING_M, you give up the current vcore (which never returns), and
* adjust the amount of cores wanted/granted.
- * - If you have only one vcore, you switch to RUNNABLE_M. When you run again,
- * you'll have one guaranteed core, starting from the entry point.
- *
- * - RES_CORES amt_wanted will be the amount running after taking away the
- * yielder, unless there are none left, in which case it will be 1.
+ * - If you have only one vcore, you switch to WAITING. There's no 'classic
+ * yield' for MCPs (at least not now). When you run again, you'll have one
+ * guaranteed core, starting from the entry point.
*
- * If the call is being nice, it means that it is in response to a preemption
- * (which needs to be checked). If there is no preemption pending, just return.
- * No matter what, don't adjust the number of cores wanted.
+ * If the call is being nice, it means different things for SCPs and MCPs. For
+ * MCPs, it means that it is in response to a preemption (which needs to be
+ * checked). If there is no preemption pending, just return. For SCPs, it
+ * means the proc wants to give up the core, but still has work to do. If not,
+ * the proc is trying to wait on an event. It's not being nice to others, it
+ * just has no work to do.
*
* This usually does not return (smp_idle()), so it will eat your reference.
* Also note that it needs a non-current/edible reference, since it will abandon
spin_lock(&p->proc_lock); /* horrible scalability. =( */
switch (p->state) {
case (PROC_RUNNING_S):
- __proc_yield_s(p, current_tf); /* current_tf 0'd in abandon core */
+ if (!being_nice) {
+ /* waiting for an event to unblock us */
+ vcpd = &p->procdata->vcore_preempt_data[0];
+ /* this check is an early optimization (check, signal, check
+ * again pattern). We could also lock before spamming the
+ * vcore in event.c */
+ if (vcpd->notif_pending) {
+ /* they can't handle events, just need to prevent a yield.
+ * (note the notif_pendings are collapsed). */
+ if (!scp_is_vcctx_ready(vcpd))
+ vcpd->notif_pending = FALSE;
+ goto out_failed;
+ }
+ /* syncing with event's SCP code. we set waiting, then check
+ * pending. they set pending, then check waiting. it's not
+ * possible for us to miss the notif *and* for them to miss
+ * WAITING. one (or both) of us will see and make sure the proc
+ * wakes up. */
+ __proc_set_state(p, PROC_WAITING);
+ wrmb(); /* don't let the state write pass the notif read */
+ if (vcpd->notif_pending) {
+ __proc_set_state(p, PROC_RUNNING_S);
+ if (!scp_is_vcctx_ready(vcpd))
+ vcpd->notif_pending = FALSE;
+ goto out_failed;
+ }
+ /* if we're here, we want to sleep. a concurrent event that
+ * hasn't already written notif_pending will have seen WAITING,
+ * and will be spinning while we do this. */
+ __proc_save_context_s(p, current_tf);
+ spin_unlock(&p->proc_lock); /* note irqs are not enabled yet */
+ } else {
+ /* yielding to allow other processes to run. we're briefly
+ * WAITING, til we are woken up */
+ __proc_set_state(p, PROC_WAITING);
+ __proc_save_context_s(p, current_tf);
+ spin_unlock(&p->proc_lock); /* note irqs are not enabled yet */
+ /* immediately wake up the proc (makes it runnable) */
+ proc_wakeup(p);
+ }
goto out_yield_core;
case (PROC_RUNNING_M):
break; /* will handle this stuff below */
/* At this point, AFAIK there should be no preempt/death messages on the
* way, and we're on the online list. So we'll go ahead and do the yielding
* business. */
- /* no need to preempt later, since we are yielding (nice or otherwise) */
- if (vc->preempt_pending)
+ /* If there's a preempt pending, we don't need to preempt later since we are
+ * yielding (nice or otherwise). If not, this is just a regular yield. */
+ if (vc->preempt_pending) {
vc->preempt_pending = 0;
+ } else {
+ /* Optional: on a normal yield, check to see if we are putting them
+ * below amt_wanted (help with user races) and bail. */
+ if (p->procdata->res_req[RES_CORES].amt_wanted >=
+ p->procinfo->num_vcores)
+ goto out_failed;
+ }
/* Don't let them yield if they are missing a notification. Userspace must
* not leave vcore context without dealing with notif_pending. pop_ros_tf()
* handles leaving via uthread context. This handles leaving via a yield.
/* Next time the vcore starts, it starts fresh */
vcpd->notif_disabled = FALSE;
__unmap_vcore(p, vcoreid);
- /* Adjust implied resource desires */
- p->resources[RES_CORES].amt_granted = --(p->procinfo->num_vcores);
- if (!being_nice)
- p->resources[RES_CORES].amt_wanted = p->procinfo->num_vcores;
+ p->procinfo->num_vcores--;
+ p->procinfo->res_grant[RES_CORES] = p->procinfo->num_vcores;
__seq_end_write(&p->procinfo->coremap_seqctr);
- /* Hand the now-idle core to the ksched */
- put_idle_core(pcoreid);
- // last vcore? then we really want 1, and to yield the gang
+ /* No more vcores? Then we wait on an event */
if (p->procinfo->num_vcores == 0) {
- p->resources[RES_CORES].amt_wanted = 1;
- /* wait on an event (not supporting 'being nice' for now */
+ /* consider a ksched op to tell it about us WAITING */
__proc_set_state(p, PROC_WAITING);
}
+ spin_unlock(&p->proc_lock);
+ /* Hand the now-idle core to the ksched */
+ put_idle_core(p, pcoreid);
goto out_yield_core;
out_failed:
/* for some reason we just want to return, either to take a KMSG that cleans
spin_unlock(&p->proc_lock);
enable_irqsave(&state);
return;
-out_yield_core: /* successfully yielded the core */
- spin_unlock(&p->proc_lock);
+out_yield_core: /* successfully yielded the core */
proc_decref(p); /* need to eat the ref passed in */
/* Clean up the core and idle. Need to do this before enabling interrupts,
* since once we put_idle_core() and unlock, we could get a startcore. */
* kernel - check the documentation. Note that pending is more about messages.
* The process needs to be in vcore_context, and the reason is usually a
* message. We set pending here in case we were called to prod them into vcore
- * context (like via a sys_self_notify. */
+ * context (like via a sys_self_notify). Also note that this works for _S
+ * procs, if you send to vcore 0 (and the proc is running). */
void proc_notify(struct proc *p, uint32_t vcoreid)
{
struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[vcoreid];
* and don't want the proc_lock to be an irqsave. Spurious
* __notify() kmsgs are okay (it checks to see if the right receiver
* is current). */
- if ((p->state & PROC_RUNNING_M) && // TODO: (VC#) (_S state)
- vcore_is_mapped(p, vcoreid)) {
+ if (vcore_is_mapped(p, vcoreid)) {
printd("[kernel] sending notif to vcore %d\n", vcoreid);
/* This use of try_get_pcoreid is racy, might be unmapped */
send_kernel_message(try_get_pcoreid(p, vcoreid), __notify, (long)p,
}
}
-/* Hold the lock before calling this. If the process is WAITING, it will wake
- * it up and schedule it. */
+/* Makes sure p is runnable. May be spammed, via the ksched. Called only by
+ * the ksched when it holds the ksched lock (or whatever). We need to lock both
+ * the ksched and the proc at some point, so we need to start this call in the
+ * ksched (lock ordering).
+ *
+ * Will call back to the ksched via one of the __sched_.cp_wakeup() calls. */
void __proc_wakeup(struct proc *p)
{
- if (p->state != PROC_WAITING)
- return;
- if (__proc_is_mcp(p))
+ spin_lock(&p->proc_lock);
+ if (__proc_is_mcp(p)) {
+ /* we only wake up WAITING mcps */
+ if (p->state != PROC_WAITING)
+ goto out_unlock;
+ if (!p->procdata->res_req[RES_CORES].amt_wanted)
+ p->procdata->res_req[RES_CORES].amt_wanted = 1;
__proc_set_state(p, PROC_RUNNABLE_M);
- else
- __proc_set_state(p, PROC_RUNNABLE_S);
- schedule_proc(p);
+ spin_unlock(&p->proc_lock);
+ __sched_mcp_wakeup(p);
+ goto out;
+ } else {
+ /* SCPs can wake up for a variety of reasons. the only times we need
+ * to do something is if it was waiting or just created. other cases
+ * are either benign (just go out), or potential bugs (_Ms) */
+ switch (p->state) {
+ case (PROC_CREATED):
+ case (PROC_WAITING):
+ __proc_set_state(p, PROC_RUNNABLE_S);
+ break;
+ case (PROC_RUNNABLE_S):
+ case (PROC_RUNNING_S):
+ case (PROC_DYING):
+ goto out_unlock;
+ case (PROC_RUNNABLE_M):
+ case (PROC_RUNNING_M):
+ warn("Weird state(%s) in %s()", procstate2str(p->state),
+ __FUNCTION__);
+ goto out_unlock;
+ }
+ printd("[kernel] FYI, waking up an _S proc\n"); /* thanks, past brho! */
+ spin_unlock(&p->proc_lock);
+ __sched_scp_wakeup(p);
+ goto out;
+ }
+out_unlock:
+ spin_unlock(&p->proc_lock);
+out:
+ return;
}
/* Is the process in multi_mode / is an MCP or not? */
/* Raw function to preempt every vcore. If you care about locking, do it before
* calling. */
-void __proc_preempt_all(struct proc *p)
+uint32_t __proc_preempt_all(struct proc *p, uint32_t *pc_arr)
{
/* instead of doing this, we could just preempt_served all possible vcores,
* and not just the active ones. We would need to sort out a way to deal
* just make us RUNNABLE_M. */
TAILQ_FOREACH(vc_i, &p->online_vcs, list)
vc_i->preempt_served = TRUE;
- __proc_take_allcores_dumb(p, TRUE);
+ return __proc_take_allcores(p, pc_arr, TRUE);
}
/* Warns and preempts a vcore from p. No delaying / alarming, or anything. The
void proc_preempt_core(struct proc *p, uint32_t pcoreid, uint64_t usec)
{
uint64_t warn_time = read_tsc() + usec2tsc(usec);
-
+ bool preempted = FALSE;
/* DYING could be okay */
if (p->state != PROC_RUNNING_M) {
warn("Tried to preempt from a non RUNNING_M proc!");
return;
}
spin_lock(&p->proc_lock);
+ /* TODO: this is racy, could be messages in flight that haven't unmapped
+ * yet, so we need to do something more complicated */
if (is_mapped_vcore(p, pcoreid)) {
__proc_preempt_warn(p, get_vcoreid(p, pcoreid), warn_time);
__proc_preempt_core(p, pcoreid);
- put_idle_core(pcoreid);
+ preempted = TRUE;
} else {
warn("Pcore doesn't belong to the process!!");
}
if (!p->procinfo->num_vcores) {
__proc_set_state(p, PROC_RUNNABLE_M);
- schedule_proc(p);
}
spin_unlock(&p->proc_lock);
+ if (preempted)
+ put_idle_core(p, pcoreid);
}
/* Warns and preempts all from p. No delaying / alarming, or anything. The
void proc_preempt_all(struct proc *p, uint64_t usec)
{
uint64_t warn_time = read_tsc() + usec2tsc(usec);
-
+ uint32_t num_revoked = 0;
spin_lock(&p->proc_lock);
+ /* storage for pc_arr is alloced at decl, which is after grabbing the lock*/
+ uint32_t pc_arr[p->procinfo->num_vcores];
/* DYING could be okay */
if (p->state != PROC_RUNNING_M) {
warn("Tried to preempt from a non RUNNING_M proc!");
return;
}
__proc_preempt_warnall(p, warn_time);
- __proc_preempt_all(p);
+ num_revoked = __proc_preempt_all(p, pc_arr);
assert(!p->procinfo->num_vcores);
__proc_set_state(p, PROC_RUNNABLE_M);
- schedule_proc(p);
spin_unlock(&p->proc_lock);
+ /* Return the cores to the ksched */
+ if (num_revoked)
+ put_idle_cores(p, pc_arr, num_revoked);
}
/* Give the specific pcore to proc p. Lots of assumptions, so don't really use
* out). */
uint32_t proc_get_vcoreid(struct proc *SAFE p, uint32_t pcoreid)
{
- uint32_t vcoreid;
- // TODO: the code currently doesn't track the vcoreid properly for _S (VC#)
- spin_lock(&p->proc_lock);
- switch (p->state) {
- case PROC_RUNNING_S:
- spin_unlock(&p->proc_lock);
- return 0; // TODO: here's the ugly part
- case PROC_RUNNING_M:
- vcoreid = get_vcoreid(p, pcoreid);
- spin_unlock(&p->proc_lock);
- return vcoreid;
- case PROC_DYING: // death message is on the way
- spin_unlock(&p->proc_lock);
- return 0;
- default:
- spin_unlock(&p->proc_lock);
- panic("Weird state(%s) in %s()", procstate2str(p->state),
- __FUNCTION__);
- }
+ struct per_cpu_info *pcpui = &per_cpu_info[pcoreid];
+ return pcpui->owning_vcoreid;
}
/* TODO: make all of these static inlines when we gut the env crap */
/********** Core granting (bulk and single) ***********/
/* Helper: gives pcore to the process, mapping it to the next available vcore
- * from list vc_list. Returns TRUE if we succeeded (non-empty). */
+ * from list vc_list. Returns TRUE if we succeeded (non-empty). If you pass in
+ * **vc, we'll tell you which vcore it was. */
static bool __proc_give_a_pcore(struct proc *p, uint32_t pcore,
- struct vcore_tailq *vc_list)
+ struct vcore_tailq *vc_list, struct vcore **vc)
{
struct vcore *new_vc;
new_vc = TAILQ_FIRST(vc_list);
TAILQ_REMOVE(vc_list, new_vc, list);
TAILQ_INSERT_TAIL(&p->online_vcs, new_vc, list);
__map_vcore(p, vcore2vcoreid(p, new_vc), pcore);
+ if (vc)
+ *vc = new_vc;
return TRUE;
}
static void __proc_give_cores_runnable(struct proc *p, uint32_t *pc_arr,
uint32_t num)
{
- struct vcore *vc_i, *vc_temp;
- struct event_msg preempt_msg = {0};
- /* They shouldn't have any vcores yet. One issue with allowing multiple
- * calls to _give_cores_ is that the bulk preempt list needs to be handled
- * in one shot. */
- assert(!p->procinfo->num_vcores);
+ assert(p->state == PROC_RUNNABLE_M);
assert(num); /* catch bugs */
/* add new items to the vcoremap */
- __seq_start_write(&p->procinfo->coremap_seqctr);
+ __seq_start_write(&p->procinfo->coremap_seqctr);/* unncessary if offline */
p->procinfo->num_vcores += num;
for (int i = 0; i < num; i++) {
/* Try from the bulk list first */
- if (__proc_give_a_pcore(p, pc_arr[i], &p->bulk_preempted_vcs))
+ if (__proc_give_a_pcore(p, pc_arr[i], &p->bulk_preempted_vcs, 0))
continue;
/* o/w, try from the inactive list. at one point, i thought there might
* be a legit way in which the inactive list could be empty, but that i
* wanted to catch it via an assert. */
- assert(__proc_give_a_pcore(p, pc_arr[i], &p->inactive_vcs));
+ assert(__proc_give_a_pcore(p, pc_arr[i], &p->inactive_vcs, 0));
}
__seq_end_write(&p->procinfo->coremap_seqctr);
- /* Send preempt messages for any left on the BP list. No need to set any
- * flags, it all was done on the real preempt. Now we're just telling the
- * process about any that didn't get restarted and are still preempted. */
- TAILQ_FOREACH_SAFE(vc_i, &p->bulk_preempted_vcs, list, vc_temp) {
- /* Note that if there are no active vcores, send_k_e will post to our
- * own vcore, the last of which will be put on the inactive list and be
- * the first to be started. We don't have to worry too much, since
- * we're holding the proc lock */
- preempt_msg.ev_type = EV_VCORE_PREEMPT;
- preempt_msg.ev_arg2 = vcore2vcoreid(p, vc_i); /* arg2 is 32 bits */
- send_kernel_event(p, &preempt_msg, 0);
- /* TODO: we may want a TAILQ_CONCAT_HEAD, or something that does that.
- * We need a loop for the messages, but not necessarily for the list
- * changes. */
- TAILQ_REMOVE(&p->bulk_preempted_vcs, vc_i, list);
- /* TODO: put on the bulk preempt list, if applicable */
- TAILQ_INSERT_HEAD(&p->inactive_vcs, vc_i, list);
- }
}
static void __proc_give_cores_running(struct proc *p, uint32_t *pc_arr,
uint32_t num)
{
+ struct vcore *vc_i;
/* Up the refcnt, since num cores are going to start using this
* process and have it loaded in their owning_proc and 'current'. */
proc_incref(p, num * 2); /* keep in sync with __startcore */
p->procinfo->num_vcores += num;
assert(TAILQ_EMPTY(&p->bulk_preempted_vcs));
for (int i = 0; i < num; i++) {
- assert(__proc_give_a_pcore(p, pc_arr[i], &p->inactive_vcs));
- send_kernel_message(pc_arr[i], __startcore, (long)p, 0, 0,
- KMSG_IMMEDIATE);
+ assert(__proc_give_a_pcore(p, pc_arr[i], &p->inactive_vcs, &vc_i));
+ send_kernel_message(pc_arr[i], __startcore, (long)p,
+ (long)vcore2vcoreid(p, vc_i), 0, KMSG_IMMEDIATE);
}
__seq_end_write(&p->procinfo->coremap_seqctr);
}
-/* Gives process p the additional num cores listed in pcorelist. You must be
- * RUNNABLE_M or RUNNING_M before calling this. If you're RUNNING_M, this will
- * startup your new cores at the entry point with their virtual IDs (or restore
- * a preemption). If you're RUNNABLE_M, you should call proc_run after this so
- * that the process can start to use its cores.
+/* Gives process p the additional num cores listed in pcorelist. If the proc is
+ * not RUNNABLE_M or RUNNING_M, this will fail and allocate none of the core
+ * (and return -1). If you're RUNNING_M, this will startup your new cores at
+ * the entry point with their virtual IDs (or restore a preemption). If you're
+ * RUNNABLE_M, you should call __proc_run_m after this so that the process can
+ * start to use its cores. In either case, this returns 0.
*
* If you're *_S, make sure your core0's TF is set (which is done when coming in
* via arch/trap.c and we are RUNNING_S), change your state, then call this.
- * Then call proc_run().
+ * Then call __proc_run_m().
*
* The reason I didn't bring the _S cases from core_request over here is so we
* can keep this family of calls dealing with only *_Ms, to avoiding caring if
- * this is called from another core, and to avoid the need_to_idle business.
- * The other way would be to have this function have the side effect of changing
- * state, and finding another way to do the need_to_idle.
+ * this is called from another core, and to avoid the _S -> _M transition.
*
* WARNING: You must hold the proc_lock before calling this! */
-void __proc_give_cores(struct proc *p, uint32_t *pc_arr, uint32_t num)
+int __proc_give_cores(struct proc *p, uint32_t *pc_arr, uint32_t num)
{
/* should never happen: */
assert(num + p->procinfo->num_vcores <= MAX_NUM_CPUS);
switch (p->state) {
case (PROC_RUNNABLE_S):
case (PROC_RUNNING_S):
- panic("Don't give cores to a process in a *_S state!\n");
- break;
+ warn("Don't give cores to a process in a *_S state!\n");
+ return -1;
case (PROC_DYING):
- panic("Attempted to give cores to a DYING process.\n");
- break;
+ case (PROC_WAITING):
+ /* can't accept, just fail */
+ return -1;
case (PROC_RUNNABLE_M):
__proc_give_cores_runnable(p, pc_arr, num);
break;
panic("Weird state(%s) in %s()", procstate2str(p->state),
__FUNCTION__);
}
- p->resources[RES_CORES].amt_granted += num;
+ /* TODO: considering moving to the ksched (hard, due to yield) */
+ p->procinfo->res_grant[RES_CORES] += num;
+ return 0;
}
/********** Core revocation (bulk and single) ***********/
* pc_arr. Will preempt if 'preempt' is set. o/w, no state will be saved, etc.
* Don't use this for taking all of a process's cores.
*
- * Make sure you hold the lock when you call this. */
-void __proc_take_corelist(struct proc *p, uint32_t *pc_arr, size_t num,
+ * Make sure you hold the lock when you call this, and make sure that the pcore
+ * actually belongs to the proc, non-trivial due to other __preempt messages. */
+void __proc_take_corelist(struct proc *p, uint32_t *pc_arr, uint32_t num,
bool preempt)
{
struct vcore *vc;
}
p->procinfo->num_vcores -= num;
__seq_end_write(&p->procinfo->coremap_seqctr);
- p->resources[RES_CORES].amt_granted -= num;
+ p->procinfo->res_grant[RES_CORES] -= num;
}
/* Takes all cores from a process (revoke via kmsg or unmap), putting them on
assert(num == p->procinfo->num_vcores);
p->procinfo->num_vcores = 0;
__seq_end_write(&p->procinfo->coremap_seqctr);
- p->resources[RES_CORES].amt_granted = 0;
+ p->procinfo->res_grant[RES_CORES] = 0;
return num;
}
-/* Dumb legacy helper, simply takes all cores and just puts them on the idle
- * core map (which belongs in the scheduler.
- *
- * TODO: no one should call this; the ksched should handle this internally */
-void __proc_take_allcores_dumb(struct proc *p, bool preempt)
-{
- uint32_t num_revoked;
- uint32_t pc_arr[p->procinfo->num_vcores];
- num_revoked = __proc_take_allcores(p, pc_arr, preempt);
- for (int i = 0; i < num_revoked; i++)
- put_idle_core(pc_arr[i]);
-}
-
/* Helper to do the vcore->pcore and inverse mapping. Hold the lock when
* calling. */
void __map_vcore(struct proc *p, uint32_t vcoreid, uint32_t pcoreid)
struct proc *p = pcpui->owning_proc;
assert(!irq_is_enabled());
pcpui->owning_proc = 0;
+ pcpui->owning_vcoreid = 0xdeadbeef;
pcpui->cur_tf = 0; /* catch bugs for now (will go away soon) */
if (p);
proc_decref(p);
bool enable_my_notif)
{
uint32_t caller_vcoreid, pcoreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[pcoreid];
struct preempt_data *caller_vcpd;
struct vcore *caller_vc, *new_vc;
struct event_msg preempt_msg = {0};
if (!is_mapped_vcore(p, pcoreid))
goto out_failed;
/* Get all our info */
- caller_vcoreid = get_vcoreid(p, pcoreid);
+ caller_vcoreid = get_vcoreid(p, pcoreid); /* holding lock, we can check */
+ assert(caller_vcoreid == pcpui->owning_vcoreid);
caller_vcpd = &p->procdata->vcore_preempt_data[caller_vcoreid];
caller_vc = vcoreid2vcore(p, caller_vcoreid);
/* Should only call from vcore context */
__unmap_vcore(p, caller_vcoreid);
__map_vcore(p, new_vcoreid, pcoreid);
__seq_end_write(&p->procinfo->coremap_seqctr);
+ /* So this core knows which vcore is here: */
+ pcpui->owning_vcoreid = new_vcoreid;
/* Send either a PREEMPT msg or a CHECK_MSGS msg. If they said to
* enable_my_notif, then all userspace needs is to check messages, not a
* full preemption recovery. */
}
/* Kernel message handler to start a process's context on this core, when the
- * core next considers running a process. Tightly coupled with proc_run().
+ * core next considers running a process. Tightly coupled with __proc_run_m().
* Interrupts are disabled. */
void __startcore(struct trapframe *tf, uint32_t srcid, long a0, long a1, long a2)
{
- uint32_t vcoreid, coreid = core_id();
+ uint32_t vcoreid = (uint32_t)a1;
+ uint32_t coreid = core_id();
struct per_cpu_info *pcpui = &per_cpu_info[coreid];
struct proc *p_to_run = (struct proc *CT(1))a0;
assert(!pcpui->owning_proc);
/* the sender of the amsg increfed already for this saved ref to p_to_run */
pcpui->owning_proc = p_to_run;
+ pcpui->owning_vcoreid = vcoreid;
/* sender increfed again, assuming we'd install to cur_proc. only do this
* if no one else is there. this is an optimization, since we expect to
* send these __startcores to idles cores, and this saves a scramble to
* incref when all of the cores restartcore/startcore later. Keep in sync
- * with __proc_give_cores() and proc_run(). */
+ * with __proc_give_cores() and __proc_run_m(). */
if (!pcpui->cur_proc) {
pcpui->cur_proc = p_to_run; /* install the ref to cur_proc */
lcr3(p_to_run->env_cr3); /* load the page tables to match cur_proc */
proc_decref(p_to_run); /* can't install, decref the extra one */
}
/* Note we are not necessarily in the cr3 of p_to_run */
- vcoreid = get_vcoreid(p_to_run, coreid);
/* Now that we sorted refcnts and know p / which vcore it should be, set up
* pcpui->cur_tf so that it will run that particular vcore */
__set_curtf_to_vcoreid(p_to_run, vcoreid);
/* Not the right proc */
if (p != pcpui->owning_proc)
return;
- /* Common cur_tf sanity checks */
+ /* Common cur_tf sanity checks. Note cur_tf could be an _S's env_tf */
assert(pcpui->cur_tf);
- assert(pcpui->cur_tf == &pcpui->actual_tf);
assert(!in_kernel(pcpui->cur_tf));
- /* We shouldn't need to lock here, since unmapping happens on the pcore and
- * mapping would only happen if the vcore was free, which it isn't until
- * after we unmap. */
- vcoreid = get_vcoreid(p, coreid);
+ vcoreid = pcpui->owning_vcoreid;
+ assert(vcoreid == get_vcoreid(p, coreid));
vcpd = &p->procdata->vcore_preempt_data[vcoreid];
+ /* for SCPs that haven't (and might never) call vc_event_init, like rtld.
+ * this is harmless for MCPS to check this */
+ if (!scp_is_vcctx_ready(vcpd))
+ return;
printd("received active notification for proc %d's vcore %d on pcore %d\n",
p->procinfo->pid, vcoreid, coreid);
/* sort signals. notifs are now masked, like an interrupt gate */
if (vcpd->notif_disabled)
return;
vcpd->notif_disabled = TRUE;
- /* This bit shouldn't be important anymore */
- vcpd->notif_pending = FALSE; // no longer pending - it made it here
/* save the old tf in the notify slot, build and pop a new one. Note that
* silly state isn't our business for a notification. */
vcpd->notif_tf = *pcpui->cur_tf;
assert(pcpui->cur_tf);
assert(pcpui->cur_tf == &pcpui->actual_tf);
assert(!in_kernel(pcpui->cur_tf));
- /* We shouldn't need to lock here, since unmapping happens on the pcore and
- * mapping would only happen if the vcore was free, which it isn't until
- * after we unmap. */
- vcoreid = get_vcoreid(p, coreid);
+ vcoreid = pcpui->owning_vcoreid;
+ assert(vcoreid == get_vcoreid(p, coreid));
p->procinfo->vcoremap[vcoreid].preempt_served = FALSE;
/* either __preempt or proc_yield() ends the preempt phase. */
p->procinfo->vcoremap[vcoreid].preempt_pending = 0;
struct per_cpu_info *pcpui = &per_cpu_info[coreid];
struct proc *p = pcpui->owning_proc;
if (p) {
- vcoreid = get_vcoreid(p, coreid);
+ vcoreid = pcpui->owning_vcoreid;
+ assert(vcoreid == get_vcoreid(p, coreid));
printd("[kernel] death on physical core %d for process %d's vcore %d\n",
coreid, p->pid, vcoreid);
__unmap_vcore(p, vcoreid);
printk("Resources:\n------------------------\n");
for (int i = 0; i < MAX_NUM_RESOURCES; i++)
printk("\tRes type: %02d, amt wanted: %08d, amt granted: %08d\n", i,
- p->resources[i].amt_wanted, p->resources[i].amt_granted);
+ p->procdata->res_req[i].amt_wanted, p->procinfo->res_grant[i]);
printk("Open Files:\n");
struct files_struct *files = &p->open_files;
spin_lock(&files->lock);