New vcoremap in procinfo
authorBarret Rhoden <brho@cs.berkeley.edu>
Sat, 13 Mar 2010 02:23:01 +0000 (18:23 -0800)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:35:39 +0000 (17:35 -0700)
The vcoremap contains new items (not just pcoreids), and it is exposed
via procinfo (along with num_vcores).  -1 no longer means invalid
either; it's more clear to have a bool for that.  Eventually we'll
signal userspace that the vcoremap (or a pcoremap) is being changed with
the seq_ctr.

If it turns out we are exposing too much info, we can move the vcoremap
back and push updates, but I'd rather not do that.

12 files changed:
kern/arch/i686/env.c
kern/include/env.h
kern/include/process.h
kern/include/ros/notification.h
kern/include/ros/procdata.h
kern/include/ros/procinfo.h
kern/src/elf.c
kern/src/env.c
kern/src/manager.c
kern/src/process.c
kern/src/resource.c
kern/src/syscall.c

index 4a858a2..15f8554 100644 (file)
@@ -19,7 +19,7 @@ void env_pop_tf(trapframe_t *tf)
        segdesc_t *my_gdt = per_cpu_info[core_id()].gdt;
        /* copy-in and check the LDT location.  the segmentation hardware write the
         * accessed bit, so we want the memory to be in the user-writeable area. */
-       segdesc_t *ldt = current->env_procdata->ldt;
+       segdesc_t *ldt = current->procdata->ldt;
        ldt = (segdesc_t*)MIN((uintptr_t)ldt, UTOP - LDT_SIZE);
        segdesc_t ldt_temp = SEG_SYS(STS_LDT, (uint32_t)ldt, LDT_SIZE, 3);
        my_gdt[GD_LDT >> 3] = ldt_temp;
index 4099a4b..a7eb636 100644 (file)
@@ -7,6 +7,7 @@
 #include <ros/memlayout.h>
 #include <ros/syscall.h>
 #include <ros/sysevent.h>
+#include <ros/procinfo.h>
 #include <error.h>
 #include <ros/procdata.h>
 #include <ros/procinfo.h>
@@ -33,11 +34,6 @@ struct Env {
        uint32_t env_refcnt;            // Reference count of kernel contexts using this
        uint32_t env_flags;
        uint32_t env_entry;
-       /* Virtual coremap: each index is the virtual core id, the contents at that
-        * index is the physical core_id() corresponding to the vcore.  -1 means it
-        * is unused */
-       int32_t vcoremap[MAX_NUM_CPUS];
-       uint32_t num_vcores;
 
        /* Cache color map: bitmap of the cache colors currently allocated to this
         * process */
@@ -57,8 +53,8 @@ struct Env {
 //     struct memregion_list memregions;
 
        // Per process info and data pages
-       procinfo_t *SAFE env_procinfo;       // KVA of per-process shared info table (RO)
-       procdata_t *SAFE env_procdata;       // KVA of per-process shared data table (RW)
+       procinfo_t *SAFE procinfo;       // KVA of per-process shared info table (RO)
+       procdata_t *SAFE procdata;       // KVA of per-process shared data table (RW)
        
        // The backring pointers for processing asynchronous system calls from the user
        // Note this is the actual backring, not a pointer to it somewhere else
index 349685b..cf87bc3 100644 (file)
@@ -99,13 +99,13 @@ void proc_yield(struct proc *SAFE p);
  *
  * WARNING: YOU MUST HOLD THE PROC_LOCK BEFORE CALLING THESE! */
 /* Gives process p the additional num cores listed in corelist */
-bool __proc_give_cores(struct proc *SAFE p, int32_t *corelist, size_t num);
+bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num);
 /* Makes process p's coremap look like corelist (add, remove, etc). Not used */
-bool __proc_set_allcores(struct proc *SAFE p, int32_t *corelist,
+bool __proc_set_allcores(struct proc *SAFE p, uint32_t *pcorelist,
                          size_t *num, amr_t message, TV(a0t) arg0,
                          TV(a1t) arg1, TV(a2t) arg2);
 /* Takes from process p the num cores listed in corelist */
-bool __proc_take_cores(struct proc *SAFE p, int32_t *corelist,
+bool __proc_take_cores(struct proc *SAFE p, uint32_t *pcorelist,
                        size_t num, amr_t message, TV(a0t) arg0,
                        TV(a1t) arg1, TV(a2t) arg2);
 bool __proc_take_allcores(struct proc *SAFE p, amr_t message, TV(a0t) arg0,
index cf000bc..82bd3da 100644 (file)
 #include <ros/arch/trapframe.h>
 // TODO: #include some one-way queue macros for the notif_event queue
 // TODO: move me to an atomic header, and give me some support functions.
+#ifndef __TMP_SEQ_CTR
+#define __TMP_SEQ_CTR
 typedef uint8_t seq_ctr_t;
+#endif
 
 /* How/If a process wants to be notified about an event */
 struct notif_method {
index bbea850..aa2697b 100644 (file)
@@ -22,6 +22,8 @@ typedef struct procdata {
 #endif
        // TODO: will replace these in a later commit
        uintptr_t stack_pointers[MAX_NUM_CPUS];
+       /* glibc relies on stuff above this point.  if you change it, you need to
+        * rebuild glibc. */
        struct notif_method             notif_methods[MAX_NR_NOTIF];
        /* Long range, would like these to be mapped in lazily, as the vcores are
         * requested.  Sharing MAX_NUM_CPUS is a bit weird too. */
index 9aaee0b..b403f12 100644 (file)
@@ -5,19 +5,40 @@
 
 #include <ros/memlayout.h>
 #include <ros/common.h>
+#include <ros/arch/arch.h>
 
 #define PROCINFO_MAX_ARGP 32
 #define PROCINFO_ARGBUF_SIZE 3072
 
+// TODO: move me to an atomic header, and give me some support functions.
+#ifndef __TMP_SEQ_CTR
+#define __TMP_SEQ_CTR
+typedef uint8_t seq_ctr_t;
+#endif
+
+/* Not necessary to expose all of this, but it doesn't hurt, and is convenient
+ * for the kernel. */
+struct vcore {
+       uint32_t                        pcoreid;
+       bool                            valid;
+       bool                            preempt_served;
+       uint64_t                        preempt_pending;
+       struct trapframe        *tf_to_run;
+};
+
 typedef struct procinfo {
        pid_t pid;
        pid_t ppid;
        size_t max_harts;
        uint64_t tsc_freq;
        void* heap_bottom;
-
        char* argp[PROCINFO_MAX_ARGP];
        char argbuf[PROCINFO_ARGBUF_SIZE];
+       /* glibc relies on stuff above this point.  if you change it, you need to
+        * rebuild glibc. */
+       struct vcore            vcoremap[MAX_NUM_CPUS];
+       uint32_t                        num_vcores;
+       seq_ctr_t                       coremap_edit;
 } procinfo_t;
 #define PROCINFO_NUM_PAGES  ((sizeof(procinfo_t)-1)/PGSIZE + 1)        
 
index 40d4270..38a3190 100644 (file)
@@ -120,13 +120,13 @@ int load_elf(struct proc* p, struct file* f)
                // put auxp after argv, envp in procinfo
                int auxp_pos = -1;
                for(int i = 0, zeros = 0; i < PROCINFO_MAX_ARGP; i++)
-                       if(p->env_procinfo->argp[i] == NULL)
+                       if(p->procinfo->argp[i] == NULL)
                                if(++zeros == 2)
                                        auxp_pos = i+1;
                if(auxp_pos == -1 ||
                   auxp_pos+sizeof(auxp)/sizeof(char*) >= PROCINFO_MAX_ARGP)
                        return -1;
-               memcpy(p->env_procinfo->argp+auxp_pos,auxp,sizeof(auxp));
+               memcpy(p->procinfo->argp+auxp_pos,auxp,sizeof(auxp));
        }
 
        uintptr_t core0_entry = ei.dynamic ? interp_ei.entry : ei.entry;
@@ -142,7 +142,7 @@ int load_elf(struct proc* p, struct file* f)
        // Set the heap bottom and top to just past where the text 
        // region has been loaded
        p->heap_top = (void*)ei.highest_addr;
-       p->env_procinfo->heap_bottom = p->heap_top;
+       p->procinfo->heap_bottom = p->heap_top;
 
        return 0;
 }
index 81f0d81..3c6ca00 100644 (file)
@@ -38,7 +38,7 @@ atomic_t num_envs;
 //     -ENOMEM if page directory or table could not be allocated.
 //
 int env_setup_vm(env_t *e)
-WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
+WRITES(e->env_pgdir, e->env_cr3, e->procinfo, e->procdata)
 {
        int i, r;
        page_t *pgdir = NULL;
@@ -80,22 +80,22 @@ WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
        /* These need to be contiguous, so the kernel can alias them.  Note the
         * pages return with a refcnt, but it's okay to insert them since we free
         * them manually when the process is cleaned up. */
-       if (!(e->env_procinfo = get_cont_pages(LOG2_UP(PROCINFO_NUM_PAGES), 0)))
+       if (!(e->procinfo = get_cont_pages(LOG2_UP(PROCINFO_NUM_PAGES), 0)))
                goto env_setup_vm_error_i;
-       if (!(e->env_procdata = get_cont_pages(LOG2_UP(PROCDATA_NUM_PAGES), 0)))
+       if (!(e->procdata = get_cont_pages(LOG2_UP(PROCDATA_NUM_PAGES), 0)))
                goto env_setup_vm_error_d;
        for (int i = 0; i < PROCINFO_NUM_PAGES; i++) {
-               if (page_insert(e->env_pgdir, kva2page((void*)e->env_procinfo + i *
+               if (page_insert(e->env_pgdir, kva2page((void*)e->procinfo + i *
                                PGSIZE), (void*SNT)(UINFO + i*PGSIZE), PTE_USER_RO) < 0)
                        goto env_setup_vm_error;
        }
        for (int i = 0; i < PROCDATA_NUM_PAGES; i++) {
-               if (page_insert(e->env_pgdir, kva2page((void*)e->env_procdata + i *
+               if (page_insert(e->env_pgdir, kva2page((void*)e->procdata + i *
                                PGSIZE), (void*SNT)(UDATA + i*PGSIZE), PTE_USER_RW) < 0)
                        goto env_setup_vm_error;
        }
-       memset(e->env_procinfo, 0, sizeof(procinfo_t));
-       memset(e->env_procdata, 0, sizeof(procdata_t));
+       memset(e->procinfo, 0, sizeof(struct procinfo));
+       memset(e->procdata, 0, sizeof(struct procdata));
 
        /* Finally, set up the Global Shared Data page for all processes.
         * Can't be trusted, but still very useful at this stage for us.
@@ -118,9 +118,9 @@ WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
        return 0;
 
 env_setup_vm_error:
-       free_cont_pages(e->env_procdata, LOG2_UP(PROCDATA_NUM_PAGES));
+       free_cont_pages(e->procdata, LOG2_UP(PROCDATA_NUM_PAGES));
 env_setup_vm_error_d:
-       free_cont_pages(e->env_procinfo, LOG2_UP(PROCINFO_NUM_PAGES));
+       free_cont_pages(e->procinfo, LOG2_UP(PROCINFO_NUM_PAGES));
 env_setup_vm_error_i:
        page_decref(shared_page);
        env_user_mem_free(e, 0, UVPT);
@@ -254,7 +254,7 @@ void env_load_icode(env_t* e, env_t* binary_env, uint8_t* binary, size_t size)
        /* Load the binary and set the current locations of the elf segments.
         * All end-of-segment pointers are page aligned (invariant) */
        e->heap_top = load_icode(e, binary_env, binary, size);
-       e->env_procinfo->heap_bottom = e->heap_top;
+       e->procinfo->heap_bottom = e->heap_top;
 }
 
 #define PER_CPU_THING(type,name)\
index f2fe313..ee3c62d 100644 (file)
@@ -216,7 +216,7 @@ void manager_waterman()
 
                char* argv[] = {"/bin/sh","-l",0};
                char* envp[] = {"LD_LIBRARY_PATH=/lib",0};
-               procinfo_pack_args(p->env_procinfo,argv,envp);
+               procinfo_pack_args(p->procinfo,argv,envp);
 
                struct file* f = file_open("/bin/busybox",0,0);
                assert(f != NULL);
index a85e88d..969bb0d 100644 (file)
@@ -49,8 +49,9 @@ static void put_idle_core(uint32_t coreid)
 /* Other helpers, implemented later. */
 static uint32_t get_free_vcoreid(struct proc *SAFE p, uint32_t prev);
 static uint32_t get_busy_vcoreid(struct proc *SAFE p, uint32_t prev);
-static int32_t __get_vcoreid(int32_t *corelist, size_t num, int32_t pcoreid);
-static int32_t get_vcoreid(struct proc *SAFE p, int32_t pcoreid);
+static uint32_t __get_vcoreid(struct vcore *vcoremap, size_t num,
+                              uint32_t pcoreid);
+static uint32_t get_vcoreid(struct proc *SAFE p, uint32_t pcoreid);
 static inline void __wait_for_ipi(const char *fnname);
 
 /* PID management. */
@@ -168,7 +169,7 @@ struct proc *pid2proc(pid_t pid)
        return p;
 }
 
-/* Performs any intialization related to processes, such as create the proc
+/* Performs any initialization related to processes, such as create the proc
  * cache, prep the scheduler, etc.  When this returns, we should be ready to use
  * any process related function. */
 void proc_init(void)
@@ -201,11 +202,14 @@ void proc_init(void)
 void
 proc_init_procinfo(struct proc* p)
 {
-       p->env_procinfo->pid = p->pid;
-       p->env_procinfo->ppid = p->ppid;
-       p->env_procinfo->tsc_freq = system_timing.tsc_freq;
+       memset(&p->procinfo->vcoremap, 0, sizeof(p->procinfo->vcoremap));
+       p->procinfo->num_vcores = 0;
+       // TODO: change these too
+       p->procinfo->pid = p->pid;
+       p->procinfo->ppid = p->ppid;
+       p->procinfo->tsc_freq = system_timing.tsc_freq;
        // TODO: maybe do something smarter here
-       p->env_procinfo->max_harts = MAX(1,num_cpus-1);
+       p->procinfo->max_harts = MAX(1,num_cpus-1);
 }
 
 /* Allocates and initializes a process, with the given parent.  Currently
@@ -250,30 +254,28 @@ static error_t proc_alloc(struct proc *SAFE*SAFE pp, pid_t parent_id)
        p->env_refcnt = 2; // one for the object, one for the ref we pass back
        p->env_flags = 0;
        p->env_entry = 0; // cheating.  this really gets set in load_icode
-       p->num_vcores = 0;
-       p->env_procinfo->heap_bottom = (void*)UTEXT;
+       p->procinfo->heap_bottom = (void*)UTEXT;
        p->heap_top = (void*)UTEXT;
-       memset(&p->vcoremap, -1, sizeof(p->vcoremap));
        memset(&p->resources, 0, sizeof(p->resources));
        memset(&p->env_ancillary_state, 0, sizeof(p->env_ancillary_state));
        memset(&p->env_tf, 0, sizeof(p->env_tf));
 
-       /* Initialize the contents of the e->env_procinfo structure */
+       /* Initialize the contents of the e->procinfo structure */
        proc_init_procinfo(p);
-       /* Initialize the contents of the e->env_procdata structure */
+       /* Initialize the contents of the e->procdata structure */
 
        /* Initialize the generic syscall ring buffer */
-       SHARED_RING_INIT(&p->env_procdata->syscallring);
+       SHARED_RING_INIT(&p->procdata->syscallring);
        /* Initialize the backend of the syscall ring buffer */
        BACK_RING_INIT(&p->syscallbackring,
-                      &p->env_procdata->syscallring,
+                      &p->procdata->syscallring,
                       SYSCALLRINGSIZE);
 
        /* Initialize the generic sysevent ring buffer */
-       SHARED_RING_INIT(&p->env_procdata->syseventring);
+       SHARED_RING_INIT(&p->procdata->syseventring);
        /* Initialize the frontend of the sysevent ring buffer */
        FRONT_RING_INIT(&p->syseventfrontring,
-                       &p->env_procdata->syseventring,
+                       &p->procdata->syseventring,
                        SYSEVENTRINGSIZE);
        *pp = p;
        atomic_inc(&num_envs);
@@ -325,8 +327,8 @@ static void __proc_free(struct proc *p)
        // Flush all mapped pages in the user portion of the address space
        env_user_mem_free(p, 0, UVPT);
        /* These need to be free again, since they were allocated with a refcnt. */
-       free_cont_pages(p->env_procinfo, LOG2_UP(PROCINFO_NUM_PAGES));
-       free_cont_pages(p->env_procdata, LOG2_UP(PROCDATA_NUM_PAGES));
+       free_cont_pages(p->procinfo, LOG2_UP(PROCINFO_NUM_PAGES));
+       free_cont_pages(p->procdata, LOG2_UP(PROCDATA_NUM_PAGES));
 
        env_pagetable_free(p);
        p->env_pgdir = 0;
@@ -385,8 +387,10 @@ void proc_run(struct proc *p)
                         * env_tf.
                         * We may need the pcoremap entry to mark it as a RUNNING_S core, or
                         * else update it here. (TODO) (PCORE) */
-                       p->num_vcores = 0;
-                       p->vcoremap[0] = core_id();
+                       // TODO: (VSEQ) signal these vcore changes
+                       p->procinfo->num_vcores = 0;
+                       p->procinfo->vcoremap[0].pcoreid = core_id();
+                       p->procinfo->vcoremap[0].valid = TRUE; // sort of.  this needs work.
                        spin_unlock_irqsave(&p->proc_lock);
                        /* Transferring our reference to startcore, where p will become
                         * current.  If it already is, decref in advance.  This is similar
@@ -399,37 +403,42 @@ void proc_run(struct proc *p)
                        /* vcoremap[i] holds the coreid of the physical core allocated to
                         * this process.  It is set outside proc_run.  For the active
                         * message, a0 = struct proc*, a1 = struct trapframe*.   */
-                       if (p->num_vcores) {
+                       if (p->procinfo->num_vcores) {
                                __proc_set_state(p, PROC_RUNNING_M);
                                int i = 0;
                                /* Up the refcnt, since num_vcores are going to start using this
                                 * process and have it loaded in their 'current'. */
-                               p->env_refcnt += p->num_vcores; // TODO: (REF) use incref
+                               p->env_refcnt += p->procinfo->num_vcores; // TODO: (REF) use incref
                                /* If the core we are running on is in the vcoremap, we will get
                                 * an IPI (once we reenable interrupts) and never return. */
-                               if (__get_vcoreid(p->vcoremap, p->num_vcores, core_id()) != -1)
+                               if (__get_vcoreid(p->procinfo->vcoremap,
+                                                 p->procinfo->num_vcores, core_id()) != -1)
                                        self_ipi_pending = TRUE;
                                // TODO: handle silly state (HSS)
                                // set virtual core 0 to run the main context on transition
                                if (p->env_flags & PROC_TRANSITION_TO_M) {
                                        p->env_flags &= !PROC_TRANSITION_TO_M;
 #ifdef __IVY__
-                                       send_active_message(p->vcoremap[0], __startcore, p,
+                                       send_active_message(p->procinfo->vcoremap[0].pcoreid,
+                                                           __startcore, p,
                                                            &p->env_tf, (void *SNT)0);
 #else
-                                       send_active_message(p->vcoremap[0], (void *)__startcore,
-                                                           (void *)p, (void *)&p->env_tf, 0);
+                                       send_active_message(p->procinfo->vcoremap[0].pcoreid,
+                                                           (void *)__startcore, (void *)p,
+                                                                               (void *)&p->env_tf, 0);
 #endif
                                        i = 1; // start at vcore1 in the loop below
                                }
                                /* handle the others. */
-                               for (/* i set above */; i < p->num_vcores; i++)
+                               for (/* i set above */; i < p->procinfo->num_vcores; i++)
 #ifdef __IVY__
-                                       send_active_message(p->vcoremap[i], __startcore,
-                                                           p, (trapframe_t *CT(1))NULL, (void *SNT)i);
+                                       send_active_message(p->procinfo->vcoremap[i].pcoreid,
+                                                           __startcore, p,
+                                                                               (trapframe_t *CT(1))NULL, (void *SNT)i);
 #else
-                                       send_active_message(p->vcoremap[i], (void *)__startcore,
-                                                           (void *)p, (void *)0, (void *)i);
+                                       send_active_message(p->procinfo->vcoremap[i].pcoreid,
+                                                           (void *)__startcore, (void *)p,
+                                                                               (void *)0, (void *)i);
 #endif
                        } else {
                                warn("Tried to proc_run() an _M with no vcores!");
@@ -552,9 +561,6 @@ void proc_startcore(struct proc *p, trapframe_t *tf) {
  * needs to change anyways when we make __death more like __preempt.  (TODO) */
 void proc_destroy(struct proc *p)
 {
-       /* TODO: this corelist is taking up a lot of space on the stack */
-       uint32_t corelist[MAX_NUM_CPUS];
-       size_t num_cores_freed;
        bool self_ipi_pending = FALSE;
        spin_lock_irqsave(&p->proc_lock);
 
@@ -584,12 +590,12 @@ void proc_destroy(struct proc *p)
                                current = NULL;
                        }
                        #endif
-                       send_active_message(p->vcoremap[0], __death, (void *SNT)0,
-                                           (void *SNT)0, (void *SNT)0);
+                       send_active_message(p->procinfo->vcoremap[0].pcoreid, __death,
+                                          (void *SNT)0, (void *SNT)0, (void *SNT)0);
                        #if 0
                        /* right now, RUNNING_S only runs on a mgmt core (0), not cores
                         * managed by the idlecoremap.  so don't do this yet. */
-                       put_idle_core(p->vcoremap[0]);
+                       put_idle_core(p->procinfo->vcoremap[0].pcoreid);
                        #endif
                        break;
                case PROC_RUNNING_M:
@@ -617,13 +623,13 @@ void proc_destroy(struct proc *p)
 }
 
 /* Helper function.  Starting from prev, it will find the next free vcoreid,
- * which is the next slot with a -1 in it.
+ * which is the next vcore that is not valid.
  * You better hold the lock before calling this. */
 static uint32_t get_free_vcoreid(struct proc *SAFE p, uint32_t prev)
 {
        uint32_t i;
        for (i = prev; i < MAX_NUM_CPUS; i++)
-               if (p->vcoremap[i] == -1)
+               if (!p->procinfo->vcoremap[i].valid)
                        break;
        if (i + 1 >= MAX_NUM_CPUS)
                warn("At the end of the vcorelist.  Might want to check that out.");
@@ -631,13 +637,13 @@ static uint32_t get_free_vcoreid(struct proc *SAFE p, uint32_t prev)
 }
 
 /* Helper function.  Starting from prev, it will find the next busy vcoreid,
- * which is the next slot with something other than a -1 in it.
+ * which is the next vcore that is valid.
  * You better hold the lock before calling this. */
 static uint32_t get_busy_vcoreid(struct proc *SAFE p, uint32_t prev)
 {
        uint32_t i;
        for (i = prev; i < MAX_NUM_CPUS; i++)
-               if (p->vcoremap[i] != -1)
+               if (p->procinfo->vcoremap[i].valid)
                        break;
        if (i + 1 >= MAX_NUM_CPUS)
                warn("At the end of the vcorelist.  Might want to check that out.");
@@ -647,12 +653,13 @@ static uint32_t get_busy_vcoreid(struct proc *SAFE p, uint32_t prev)
 /* Helper function.  Find the vcoreid for a given physical core id.  If we use
  * some sort of pcoremap, we can avoid this linear search.  You better hold the
  * lock before calling this.  Returns -1 on failure. */
-static int32_t __get_vcoreid(int32_t *corelist, size_t num, int32_t pcoreid)
+static uint32_t __get_vcoreid(struct vcore *vcoremap, size_t num,
+                              uint32_t pcoreid)
 {
-       int32_t i;
+       uint32_t i;
        bool found = FALSE;
        for (i = 0; i < num; i++)
-               if (corelist[i] == pcoreid) {
+               if (vcoremap[i].pcoreid == pcoreid) {
                        found = TRUE;
                        break;
                }
@@ -664,10 +671,11 @@ static int32_t __get_vcoreid(int32_t *corelist, size_t num, int32_t pcoreid)
 
 /* Helper function.  Just like the one above, but this one panics on failure.
  * You better hold the lock before calling this.  */
-static int32_t get_vcoreid(struct proc *SAFE p, int32_t pcoreid)
+static uint32_t get_vcoreid(struct proc *SAFE p, uint32_t pcoreid)
 {
-       int32_t vcoreid = __get_vcoreid(p->vcoremap, p->num_vcores, pcoreid);
-       assert(vcoreid != -1);
+       uint32_t vcoreid = __get_vcoreid(p->procinfo->vcoremap,
+                                        p->procinfo->num_vcores, pcoreid);
+       assert(p->procinfo->vcoremap[vcoreid].valid);
        return vcoreid;
 }
 
@@ -707,14 +715,15 @@ void proc_yield(struct proc *SAFE p)
                        schedule_proc(p);
                        break;
                case (PROC_RUNNING_M):
+                       // TODO: (VSEQ) signal these vcore changes
                        // give up core
-                       p->vcoremap[get_vcoreid(p, core_id())] = -1;
-                       p->resources[RES_CORES].amt_granted = --(p->num_vcores);
-                       p->resources[RES_CORES].amt_wanted = p->num_vcores;
+                       p->procinfo->vcoremap[get_vcoreid(p, core_id())].valid = FALSE;
+                       p->resources[RES_CORES].amt_granted = --(p->procinfo->num_vcores);
+                       p->resources[RES_CORES].amt_wanted = p->procinfo->num_vcores;
                        // add to idle list
                        put_idle_core(core_id());
                        // last vcore?  then we really want 1, and to yield the gang
-                       if (p->num_vcores == 0) {
+                       if (p->procinfo->num_vcores == 0) {
                                // might replace this with m_yield, if we have it directly
                                p->resources[RES_CORES].amt_wanted = 1;
                                __proc_set_state(p, PROC_RUNNABLE_M);
@@ -732,7 +741,7 @@ void proc_yield(struct proc *SAFE p)
        abandon_core();
 }
 
-/* Gives process p the additional num cores listed in corelist.  You must be
+/* Gives process p the additional num cores listed in pcorelist.  You must be
  * RUNNABLE_M or RUNNING_M before calling this.  If you're RUNNING_M, this will
  * startup your new cores at the entry point with their virtual IDs.  If you're
  * RUNNABLE_M, you should call proc_run after this so that the process can start
@@ -752,7 +761,7 @@ void proc_yield(struct proc *SAFE p)
  * once you unlock after this function.
  *
  * WARNING: You must hold the proc_lock before calling this! */
-bool __proc_give_cores(struct proc *SAFE p, int32_t *corelist, size_t num)
+bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num)
 { TRUSTEDBLOCK
        bool self_ipi_pending = FALSE;
        uint32_t free_vcoreid = 0;
@@ -767,21 +776,24 @@ bool __proc_give_cores(struct proc *SAFE p, int32_t *corelist, size_t num)
                case (PROC_RUNNABLE_M):
                        // set up vcoremap.  list should be empty, but could be called
                        // multiple times before proc_running (someone changed their mind?)
-                       if (p->num_vcores) {
+                       if (p->procinfo->num_vcores) {
                                printk("[kernel] Yaaaaaarrrrr!  Giving extra cores, are we?\n");
                                // debugging: if we aren't packed, then there's a problem
                                // somewhere, like someone forgot to take vcores after
                                // preempting.
-                               for (int i = 0; i < p->num_vcores; i++)
-                                       assert(p->vcoremap[i]);
+                               for (int i = 0; i < p->procinfo->num_vcores; i++)
+                                       assert(p->procinfo->vcoremap[i].valid);
                        }
+                       // TODO: (VSEQ) signal these vcore changes
                        // add new items to the vcoremap
                        for (int i = 0; i < num; i++) {
                                // find the next free slot, which should be the next one
                                free_vcoreid = get_free_vcoreid(p, free_vcoreid);
-                               printd("setting vcore %d to pcore %d\n", free_vcoreid, corelist[i]);
-                               p->vcoremap[free_vcoreid] = corelist[i];
-                               p->num_vcores++;
+                               printd("setting vcore %d to pcore %d\n", free_vcoreid,
+                                      pcorelist[i]);
+                               p->procinfo->vcoremap[free_vcoreid].pcoreid = pcorelist[i];
+                               p->procinfo->vcoremap[free_vcoreid].valid = TRUE;
+                               p->procinfo->num_vcores++;
                        }
                        break;
                case (PROC_RUNNING_M):
@@ -789,16 +801,19 @@ bool __proc_give_cores(struct proc *SAFE p, int32_t *corelist, size_t num)
                         * process and have it loaded in their 'current'. */
                        // TODO: (REF) use proc_incref once we have atomics
                        p->env_refcnt += num;
-                       if (__get_vcoreid(corelist, num, core_id()) != -1)
-                               self_ipi_pending = TRUE;
+                       // TODO: (VSEQ) signal these vcore changes
                        for (int i = 0; i < num; i++) {
                                free_vcoreid = get_free_vcoreid(p, free_vcoreid);
-                               printd("setting vcore %d to pcore %d\n", free_vcoreid, corelist[i]);
-                               p->vcoremap[free_vcoreid] = corelist[i];
-                               p->num_vcores++;
-                               send_active_message(corelist[i], __startcore, p,
-                                                   (struct Trapframe *)0,
+                               printd("setting vcore %d to pcore %d\n", free_vcoreid,
+                                      pcorelist[i]);
+                               p->procinfo->vcoremap[free_vcoreid].pcoreid = pcorelist[i];
+                               p->procinfo->vcoremap[free_vcoreid].valid = TRUE;
+                               p->procinfo->num_vcores++;
+                               send_active_message(pcorelist[i], __startcore, p,
+                                                   (struct trapframe *)0,
                                                    (void*SNT)free_vcoreid);
+                               if (pcorelist[i] == core_id())
+                                       self_ipi_pending = TRUE;
                        }
                        break;
                default:
@@ -807,7 +822,7 @@ bool __proc_give_cores(struct proc *SAFE p, int32_t *corelist, size_t num)
        return self_ipi_pending;
 }
 
-/* Makes process p's coremap look like corelist (add, remove, etc).  Caller
+/* Makes process p's coremap look like pcorelist (add, remove, etc).  Caller
  * needs to know what cores are free after this call (removed, failed, etc).
  * This info will be returned via corelist and *num.  This will send message to
  * any cores that are getting removed.
@@ -818,19 +833,19 @@ bool __proc_give_cores(struct proc *SAFE p, int32_t *corelist, size_t num)
  * implementing it.
  *
  * WARNING: You must hold the proc_lock before calling this! */
-bool __proc_set_allcores(struct proc *SAFE p, int32_t *corelist,
+bool __proc_set_allcores(struct proc *SAFE p, uint32_t *pcorelist,
                          size_t *num, amr_t message,TV(a0t) arg0,
                          TV(a1t) arg1, TV(a2t) arg2)
 {
        panic("Set all cores not implemented.\n");
 }
 
-/* Takes from process p the num cores listed in corelist, using the given
+/* Takes from process p the num cores listed in pcorelist, using the given
  * message for the active message (__death, __preempt, etc).  Like the others
  * in this function group, bool signals whether or not an IPI is pending.
  *
  * WARNING: You must hold the proc_lock before calling this! */
-bool __proc_take_cores(struct proc *SAFE p, int32_t *corelist,
+bool __proc_take_cores(struct proc *SAFE p, uint32_t *pcorelist,
                        size_t num, amr_t message, TV(a0t) arg0,
                        TV(a1t) arg1, TV(a2t) arg2)
 { TRUSTEDBLOCK
@@ -847,21 +862,23 @@ bool __proc_take_cores(struct proc *SAFE p, int32_t *corelist,
                        panic("Weird state %d in proc_take_cores()!\n", p->state);
        }
        spin_lock(&idle_lock);
-       assert((num <= p->num_vcores) && (num_idlecores + num <= num_cpus));
+       assert((num <= p->procinfo->num_vcores) &&
+              (num_idlecores + num <= num_cpus));
        spin_unlock(&idle_lock);
+       // TODO: (VSEQ) signal these vcore changes
        for (int i = 0; i < num; i++) {
-               vcoreid = get_vcoreid(p, corelist[i]);
-               assert(p->vcoremap[vcoreid] == corelist[i]);
+               vcoreid = get_vcoreid(p, pcorelist[i]);
+               assert(p->procinfo->vcoremap[vcoreid].pcoreid == pcorelist[i]);
                if (message) {
-                       if (p->vcoremap[vcoreid] == core_id())
+                       if (p->procinfo->vcoremap[vcoreid].pcoreid == core_id())
                                self_ipi_pending = TRUE;
-                       send_active_message(corelist[i], message, arg0, arg1, arg2);
+                       send_active_message(pcorelist[i], message, arg0, arg1, arg2);
                }
                // give the pcore back to the idlecoremap
-               put_idle_core(corelist[i]);
-               p->vcoremap[vcoreid] = -1;
+               p->procinfo->vcoremap[vcoreid].valid = FALSE;
+               put_idle_core(pcorelist[i]);
        }
-       p->num_vcores -= num;
+       p->procinfo->num_vcores -= num;
        p->resources[RES_CORES].amt_granted -= num;
        return self_ipi_pending;
 }
@@ -888,22 +905,23 @@ bool __proc_take_allcores(struct proc *SAFE p, amr_t message,
                        panic("Weird state %d in proc_take_allcores()!\n", p->state);
        }
        spin_lock(&idle_lock);
-       assert(num_idlecores + p->num_vcores <= num_cpus); // sanity
+       assert(num_idlecores + p->procinfo->num_vcores <= num_cpus); // sanity
        spin_unlock(&idle_lock);
-       for (int i = 0; i < p->num_vcores; i++) {
+       // TODO: (VSEQ) signal these vcore changes
+       for (int i = 0; i < p->procinfo->num_vcores; i++) {
                // find next active vcore
                active_vcoreid = get_busy_vcoreid(p, active_vcoreid);
                if (message) {
-                       if (p->vcoremap[active_vcoreid] == core_id())
+                       if (p->procinfo->vcoremap[active_vcoreid].pcoreid == core_id())
                                self_ipi_pending = TRUE;
-                       send_active_message(p->vcoremap[active_vcoreid], message,
-                                            arg0, arg1, arg2);
+                       send_active_message(p->procinfo->vcoremap[active_vcoreid].pcoreid,
+                                           message, arg0, arg1, arg2);
                }
                // give the pcore back to the idlecoremap
-               put_idle_core(p->vcoremap[active_vcoreid]);
-               p->vcoremap[active_vcoreid] = -1;
+               p->procinfo->vcoremap[active_vcoreid].valid = FALSE;
+               put_idle_core(p->procinfo->vcoremap[active_vcoreid].pcoreid);
        }
-       p->num_vcores = 0;
+       p->procinfo->num_vcores = 0;
        p->resources[RES_CORES].amt_granted = 0;
        return self_ipi_pending;
 }
@@ -985,7 +1003,7 @@ void __startcore(trapframe_t *tf, uint32_t srcid, void * a0, void * a1,
                tf_to_pop = &local_tf;
                memset(tf_to_pop, 0, sizeof(*tf_to_pop));
                proc_init_trapframe(tf_to_pop, vcoreid, p_to_run->env_entry,
-                                   p_to_run->env_procdata->stack_pointers[vcoreid]);
+                                   p_to_run->procdata->stack_pointers[vcoreid]);
        }
        /* the sender of the amsg increfed, thinking we weren't running current. */
        if (p_to_run == current)
@@ -1030,8 +1048,9 @@ void print_allpids(void)
                printk("PID      STATE    \n");
                printk("------------------\n");
                do {
-                       struct proc* p = hashtable_iterator_value(phtable_i);
-                       printk("%8d %s\n", hashtable_iterator_key(phtable_i),p ? procstate2str(p->state) : "(null)");
+                       struct proc *p = hashtable_iterator_value(phtable_i);
+                       printk("%8d %s\n", hashtable_iterator_key(phtable_i),
+                              p ? procstate2str(p->state) : "(null)");
                } while (hashtable_iterator_advance(phtable_i));
        }
        spin_unlock(&pid_hash_lock);
@@ -1055,11 +1074,11 @@ void print_proc_info(pid_t pid)
        printk("Refcnt: %d\n", p->env_refcnt - 1); // don't report our ref
        printk("Flags: 0x%08x\n", p->env_flags);
        printk("CR3(phys): 0x%08x\n", p->env_cr3);
-       printk("Num Vcores: %d\n", p->num_vcores);
+       printk("Num Vcores: %d\n", p->procinfo->num_vcores);
        printk("Vcoremap:\n");
-       for (int i = 0; i < p->num_vcores; i++) {
+       for (int i = 0; i < p->procinfo->num_vcores; i++) {
                j = get_busy_vcoreid(p, j);
-               printk("\tVcore %d: Pcore %d\n", j, p->vcoremap[j]);
+               printk("\tVcore %d: Pcore %d\n", j, p->procinfo->vcoremap[j].pcoreid);
                j++;
        }
        printk("Resources:\n");
index 82db0a4..8b6fa21 100644 (file)
@@ -38,7 +38,7 @@ ssize_t core_request(struct proc *p)
 {
        size_t num_granted;
        ssize_t amt_new;
-       int32_t corelist[MAX_NUM_CPUS];
+       uint32_t corelist[MAX_NUM_CPUS];
        bool need_to_idle = FALSE;
        bool self_ipi_pending = FALSE;
 
@@ -97,7 +97,8 @@ ssize_t core_request(struct proc *p)
                        case (PROC_RUNNING_S):
                                // issue with if we're async or not (need to preempt it)
                                // either of these should trip it. TODO: (ACR) async core req
-                               if ((current != p) || (p->vcoremap[0] != core_id()))
+                               // TODO: relies on vcore0 being the caller (VC#)
+                               if ((current != p) || (p->procinfo->vcoremap[0].pcoreid != core_id()))
                                        panic("We don't handle async RUNNING_S core requests yet.");
                                /* save the tf to be restarted on another core (in proc_run) */
                                p->env_tf = *current_tf;
@@ -111,7 +112,8 @@ ssize_t core_request(struct proc *p)
                                 * syscall). */
                                /* this process no longer runs on its old location (which is
                                 * this core, for now, since we don't handle async calls) */
-                               p->vcoremap[0] = -1;
+                               // TODO: (VSEQ) signal these vcore changes
+                               p->procinfo->vcoremap[0].valid = FALSE;
                                // will need to give up this core / idle later (sync)
                                need_to_idle = TRUE;
                                // change to runnable_m (it's TF is already saved)
index aedfb81..ac1010c 100644 (file)
@@ -167,8 +167,8 @@ static size_t sys_getvcoreid(env_t* e)
                return 0;
 
        size_t i;
-       for(i = 0; i < e->num_vcores; i++)
-               if(core_id() == e->vcoremap[i])
+       for(i = 0; i < e->procinfo->num_vcores; i++)
+               if(core_id() == e->procinfo->vcoremap[i].pcoreid)
                        return i;
 
        panic("virtual core id not found in sys_getvcoreid()!");
@@ -285,7 +285,9 @@ static ssize_t sys_run_binary(env_t* e, void *DANGEROUS binary_buf, size_t len,
        env_t* env = proc_create(NULL,0);
        assert(env != NULL);
 
-       if(memcpy_from_user(e,e->env_procinfo,procinfo,sizeof(*procinfo)))
+       // let me know if you use this.  we need to sort process creation better.
+       printk("sys_run_binary() is deprecated.  Use at your own risk.");
+       if(memcpy_from_user(e,e->procinfo,procinfo,sizeof(*procinfo)))
                return -1;
        proc_init_procinfo(e);
 
@@ -358,11 +360,12 @@ static ssize_t sys_fork(env_t* e)
                return 0;
        }
 
+       // TODO: (PC) this won't work.  Needs revisiting.
        // copy procdata and procinfo
-       memcpy(env->env_procdata,e->env_procdata,sizeof(struct procdata));
-       memcpy(env->env_procinfo,e->env_procinfo,sizeof(struct procinfo));
-       env->env_procinfo->pid = env->pid;
-       env->env_procinfo->ppid = env->ppid;
+       memcpy(env->procdata,e->procdata,sizeof(struct procdata));
+       memcpy(env->procinfo,e->procinfo,sizeof(struct procinfo));
+       env->procinfo->pid = env->pid;
+       env->procinfo->ppid = env->ppid;
 
        // copy all memory below procdata
        if(env_user_mem_walk(e,0,UDATA,&copy_page,env))
@@ -395,12 +398,14 @@ intreg_t sys_exec(struct proc* p, int fd, procinfo_t* pi)
                goto out;
        }
 
-       if(memcpy_from_user(p,p->env_procinfo,pi,sizeof(procinfo_t))) {
+       // TODO: don't copy procinfo from the user (PC)
+       if(memcpy_from_user(p,p->procinfo,pi,sizeof(procinfo_t))) {
                proc_destroy(p);
                goto out;
        }
        proc_init_procinfo(p);
-       memset(p->env_procdata, 0, sizeof(procdata_t));
+       // TODO: don't do this either (PC)
+       memset(p->procdata, 0, sizeof(procdata_t));
 
        env_user_mem_free(p,0,USTACKTOP);
 
@@ -489,7 +494,7 @@ static void* sys_brk(struct proc *p, void* addr) {
 
        spin_lock_irqsave(&p->proc_lock);
 
-       if((addr < p->env_procinfo->heap_bottom) || (addr >= (void*)BRK_END))
+       if((addr < p->procinfo->heap_bottom) || (addr >= (void*)BRK_END))
                goto out;
 
        uintptr_t real_heap_top = ROUNDUP((uintptr_t)p->heap_top,PGSIZE);