Rename transition_stack -> vcore_stack (XCC)
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 22 Jul 2015 19:30:28 +0000 (15:30 -0400)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 22 Jul 2015 19:33:43 +0000 (15:33 -0400)
The name 'transition' predates vcores.  Might as well keep the
vcore_stack in line with vcore_entry and vcore_tls_desc.

Reinstall your kernel headers.

kern/include/ros/event.h
kern/src/process.c
kern/src/trap.c
user/parlib/event.c
user/parlib/uthread.c
user/parlib/vcore.c

index 7336350..9b167ba 100644 (file)
@@ -114,7 +114,7 @@ struct preempt_data {
        struct ancillary_state          preempt_anc;
        struct user_context                     uthread_ctx;            /* for preempts or notifs */
        uintptr_t                                       vcore_entry;            /* advertised by the user */
-       uintptr_t                                       transition_stack;       /* advertised by the user */
+       uintptr_t                                       vcore_stack;            /* advertised by the user */
        uintptr_t                                       vcore_tls_desc;         /* advertised by the user */
        atomic_t                                        flags;
        int                                                     rflags;                         /* racy flags */
index 96e47b0..2b6e882 100644 (file)
@@ -619,7 +619,7 @@ void proc_run_s(struct proc *p)
                                pcpui->cur_ctx = &pcpui->actual_ctx;
                                memset(pcpui->cur_ctx, 0, sizeof(struct user_context));
                                proc_init_ctx(pcpui->cur_ctx, 0, vcpd->vcore_entry,
-                                             vcpd->transition_stack, vcpd->vcore_tls_desc);
+                                             vcpd->vcore_stack, vcpd->vcore_tls_desc);
                        } else {
                                /* If they have no transition stack, then they can't receive
                                 * events.  The most they are getting is a wakeup from the
@@ -1898,9 +1898,9 @@ static void __set_curctx_to_vcoreid(struct proc *p, uint32_t vcoreid,
                pcpui->actual_ctx = vcpd->vcore_ctx;
                proc_secure_ctx(&pcpui->actual_ctx);
        } else { /* not restarting from a preemption, use a fresh vcore */
-               assert(vcpd->transition_stack);
+               assert(vcpd->vcore_stack);
                proc_init_ctx(&pcpui->actual_ctx, vcoreid, vcpd->vcore_entry,
-                             vcpd->transition_stack, vcpd->vcore_tls_desc);
+                             vcpd->vcore_stack, vcpd->vcore_tls_desc);
                /* Disable/mask active notifications for fresh vcores */
                vcpd->notif_disabled = TRUE;
        }
@@ -2136,7 +2136,7 @@ void __notify(uint32_t srcid, long a0, long a1, long a2)
        vcpd->uthread_ctx = *pcpui->cur_ctx;
        memset(pcpui->cur_ctx, 0, sizeof(struct user_context));
        proc_init_ctx(pcpui->cur_ctx, vcoreid, vcpd->vcore_entry,
-                     vcpd->transition_stack, vcpd->vcore_tls_desc);
+                     vcpd->vcore_stack, vcpd->vcore_tls_desc);
        /* this cur_ctx will get run when the kernel returns / idles */
 }
 
index dd2c79e..35c13cf 100644 (file)
@@ -69,7 +69,7 @@ void reflect_unhandled_trap(unsigned int trap_nr, unsigned int err,
        vcpd->uthread_ctx = *pcpui->cur_ctx;
        memset(pcpui->cur_ctx, 0, sizeof(struct user_context));
        proc_init_ctx(pcpui->cur_ctx, vcoreid, vcpd->vcore_entry,
-                     vcpd->transition_stack, vcpd->vcore_tls_desc);
+                     vcpd->vcore_stack, vcpd->vcore_tls_desc);
        return;
 error_out:
        print_unhandled_trap(p, pcpui->cur_ctx, trap_nr, err, aux);
index bb21a65..ef61a4a 100644 (file)
@@ -386,7 +386,7 @@ void handle_vcpd_mbox(uint32_t rem_vcoreid)
        __vc_handle_an_mbox = TRUE;
        __vc_rem_vcoreid = rem_vcoreid;
        /* Reset the stack and start over in vcore context */
-       set_stack_pointer((void*)vcpd->transition_stack);
+       set_stack_pointer((void*)vcpd->vcore_stack);
        vcore_entry();
        assert(0);
 }
index 7e11bda..89c9c54 100644 (file)
@@ -412,7 +412,7 @@ void uthread_yield(bool save_state, void (*yield_func)(struct uthread*, void*),
         * as you need to any local vars that might be pushed before calling the
         * next function, or for whatever other reason the compiler/hardware might
         * walk up the stack a bit when calling a noreturn function. */
-       set_stack_pointer((void*)vcpd->transition_stack);
+       set_stack_pointer((void*)vcpd->vcore_stack);
        /* Finish exiting in another function. */
        __uthread_yield();
        /* Should never get here */
@@ -572,7 +572,7 @@ void run_current_uthread(void)
                uth->flags |= UTHREAD_SAVED | UTHREAD_FPSAVED;
                handle_refl_fault(uth, &vcpd->uthread_ctx);
                /* we abort no matter what.  up to the 2LS to reschedule the thread */
-               set_stack_pointer((void*)vcpd->transition_stack);
+               set_stack_pointer((void*)vcpd->vcore_stack);
                vcore_entry();
        }
        /* Go ahead and start the uthread */
@@ -613,7 +613,7 @@ void run_uthread(struct uthread *uthread)
                clear_refl_fault(&uthread->u_ctx);
                handle_refl_fault(uthread, &uthread->u_ctx);
                /* we abort no matter what.  up to the 2LS to reschedule the thread */
-               set_stack_pointer((void*)vcpd->transition_stack);
+               set_stack_pointer((void*)vcpd->vcore_stack);
                vcore_entry();
        }
        uthread->state = UT_RUNNING;
index 862c490..18d5655 100644 (file)
@@ -114,15 +114,15 @@ static int allocate_transition_tls(int id)
        return 0;
 }
 
-static void free_transition_stack(int id)
+static void free_vcore_stack(int id)
 {
        // don't actually free stacks
 }
 
-static int allocate_transition_stack(int id)
+static int allocate_vcore_stack(int id)
 {
        struct preempt_data *vcpd = vcpd_of(id);
-       if (vcpd->transition_stack)
+       if (vcpd->vcore_stack)
                return 0; // reuse old stack
 
        void* stackbot = mmap(0, TRANSITION_STACK_SIZE,
@@ -132,7 +132,7 @@ static int allocate_transition_stack(int id)
        if(stackbot == MAP_FAILED)
                return -1; // errno set by mmap
 
-       vcpd->transition_stack = (uintptr_t)stackbot + TRANSITION_STACK_SIZE;
+       vcpd->vcore_stack = (uintptr_t)stackbot + TRANSITION_STACK_SIZE;
 
        return 0;
 }
@@ -159,7 +159,7 @@ void __attribute__((constructor)) vcore_lib_init(void)
        /* Need to alloc vcore0's transition stuff here (technically, just the TLS)
         * so that schedulers can use vcore0's transition TLS before it comes up in
         * vcore_entry() */
-       if(allocate_transition_stack(0) || allocate_transition_tls(0))
+       if (allocate_vcore_stack(0) || allocate_transition_tls(0))
                goto vcore_lib_init_fail;
 
        /* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
@@ -206,7 +206,7 @@ void vcore_reenter(void (*entry_func)(void))
   struct preempt_data *vcpd = vcpd_of(vcore_id());
 
   __vcore_reentry_func = entry_func;
-  set_stack_pointer((void*)vcpd->transition_stack);
+  set_stack_pointer((void*)vcpd->vcore_stack);
   cmb();
   __vcore_reenter();
 }
@@ -302,7 +302,7 @@ try_handle_it:
                nr_vcores_wanted = MIN(nr_vcores_wanted, max_vcores());
                /* Make sure all we might ask for are prepped */
                for (long i = _max_vcores_ever_wanted; i < nr_vcores_wanted; i++) {
-                       if (allocate_transition_stack(i) || allocate_transition_tls(i)) {
+                       if (allocate_vcore_stack(i) || allocate_transition_tls(i)) {
                                atomic_set(&vc_req_being_handled, 0);   /* unlock and bail out*/
                                return -1;
                        }