Make __proc_save_context_s assume the current_ctx
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 19 Nov 2015 22:13:08 +0000 (14:13 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Tue, 8 Dec 2015 20:59:08 +0000 (15:59 -0500)
All callers of this function pass in the current_ctx.  There's no
reasonable use case where we're trying to save any other context for an
SCP.  If there is a need for that in the future, the function
implementing that will need to deal with finalizing partial user
contexts.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
kern/include/process.h
kern/src/mm.c
kern/src/process.c
kern/src/schedule.c

index 9ef1c5c..141225f 100644 (file)
@@ -89,7 +89,7 @@ void proc_signal_parent(struct proc *child);
 int __proc_disown_child(struct proc *parent, struct proc *child);
 int proc_change_to_m(struct proc *p);
 void __proc_save_fpu_s(struct proc *p);
-void __proc_save_context_s(struct proc *p, struct user_context *ctx);
+void __proc_save_context_s(struct proc *p);
 void proc_yield(struct proc *p, bool being_nice);
 void proc_notify(struct proc *p, uint32_t vcoreid);
 void proc_wakeup(struct proc *p);
index c1ff7c0..643e9f1 100644 (file)
@@ -888,7 +888,7 @@ static int __hpf_load_page(struct proc *p, struct page_map *pm,
                        /* it's possible for HPF to loop a few times; we can only save the
                         * first time, o/w we could clobber. */
                        if (first) {
-                               __proc_save_context_s(p, pcpui->cur_ctx);
+                               __proc_save_context_s(p);
                                __proc_save_fpu_s(p);
                                /* We clear the owner, since userspace doesn't run here
                                 * anymore, but we won't abandon since the fault handler
index 948b8d6..c9d63f7 100644 (file)
@@ -1089,9 +1089,9 @@ void __proc_save_fpu_s(struct proc *p)
  * In the future, we'll probably use vc0's space for scp_ctx and the silly
  * state.  If we ever do that, we'll need to stop using scp_ctx (soon to be in
  * VCPD) as a location for pcpui->cur_ctx to point (dangerous) */
-void __proc_save_context_s(struct proc *p, struct user_context *ctx)
+void __proc_save_context_s(struct proc *p)
 {
-       p->scp_ctx = *ctx;
+       p->scp_ctx = *current_ctx;
        __seq_start_write(&p->procinfo->coremap_seqctr);
        __unmap_vcore(p, 0);
        __seq_end_write(&p->procinfo->coremap_seqctr);
@@ -1154,13 +1154,13 @@ void proc_yield(struct proc *p, bool being_nice)
                                /* if we're here, we want to sleep.  a concurrent event that
                                 * hasn't already written notif_pending will have seen WAITING,
                                 * and will be spinning while we do this. */
-                               __proc_save_context_s(p, current_ctx);
+                               __proc_save_context_s(p);
                                spin_unlock(&p->proc_lock);
                        } else {
                                /* yielding to allow other processes to run.  we're briefly
                                 * WAITING, til we are woken up */
                                __proc_set_state(p, PROC_WAITING);
-                               __proc_save_context_s(p, current_ctx);
+                               __proc_save_context_s(p);
                                spin_unlock(&p->proc_lock);
                                /* immediately wake up the proc (makes it runnable) */
                                proc_wakeup(p);
index 6f2b6ab..83fc2bc 100644 (file)
@@ -315,7 +315,7 @@ static bool __schedule_scp(void)
                        /* Saving FP state aggressively.  Odds are, the SCP was hit by an
                         * IRQ and has a HW ctx, in which case we must save. */
                        __proc_save_fpu_s(pcpui->owning_proc);
-                       __proc_save_context_s(pcpui->owning_proc, pcpui->cur_ctx);
+                       __proc_save_context_s(pcpui->owning_proc);
                        vcore_account_offline(pcpui->owning_proc, 0);
                        __seq_start_write(&p->procinfo->coremap_seqctr);
                        __unmap_vcore(p, 0);