atomic_init(&num_envs, 0);
}
+static void
+proc_init_procinfo(struct proc* p)
+{
+ p->env_procinfo->pid = p->pid;
+ p->env_procinfo->ppid = p->ppid;
+ p->env_procinfo->tsc_freq = system_timing.tsc_freq;
+ // TODO: maybe do something smarter here
+ p->env_procinfo->max_harts = MAX(1,num_cpus); // hack to use all cores
+}
+
/* Allocates and initializes a process, with the given parent. Currently
* writes the *p into **pp, and returns 0 on success, < 0 for an error.
* Errors include:
if (!(p = kmem_cache_alloc(proc_cache, 0)))
return -ENOMEM;
- { INITSTRUCT(*p)
+ { INITSTRUCT(*p)
+
+ // Setup the default map of where to get cache colors from
+ p->cache_colors_map = global_cache_colors_map;
+ p->next_cache_color = 0;
/* Initialize the address space */
if ((r = env_setup_vm(p)) < 0) {
spin_unlock(&pid_hash_lock);
/* Set the basic status variables. */
- spinlock_init(&p->proc_lock);
+ spinlock_init(&p->proc_lock);
+ p->exitcode = 0;
p->ppid = parent_id;
- __proc_set_state(p, PROC_CREATED);
+ p->state = PROC_CREATED; // shouldn't go through state machine for init
p->env_refcnt = 2; // one for the object, one for the ref we pass back
p->env_flags = 0;
p->env_entry = 0; // cheating. this really gets set in load_icode
p->num_vcores = 0;
+ p->heap_bottom = (void*)UTEXT;
+ p->heap_top = (void*)UTEXT;
memset(&p->vcoremap, -1, sizeof(p->vcoremap));
memset(&p->resources, 0, sizeof(p->resources));
memset(&p->env_ancillary_state, 0, sizeof(p->env_ancillary_state));
memset(&p->env_tf, 0, sizeof(p->env_tf));
- proc_init_trapframe(&p->env_tf);
+ proc_init_trapframe(&p->env_tf,0);
/* Initialize the contents of the e->env_procinfo structure */
- p->env_procinfo->pid = p->pid;
+ proc_init_procinfo(p);
/* Initialize the contents of the e->env_procdata structure */
/* Initialize the generic syscall ring buffer */
*pp = p;
atomic_inc(&num_envs);
- printk("[%08x] new process %08x\n", current ? current->pid : 0, p->pid);
+ proc_init_arch(p);
+
+ printd("[%08x] new process %08x\n", current ? current->pid : 0, p->pid);
} // INIT_STRUCT
return 0;
}
curid = (current ? current->pid : 0);
if ((r = proc_alloc(&p, curid)) < 0)
panic("proc_create: %e", r); // one of 3 quaint usages of %e.
- load_icode(p, binary, size);
+ if(binary != NULL)
+ env_load_icode(p, NULL, binary, size);
return p;
}
{
physaddr_t pa;
- printk("[PID %d] freeing proc: %d\n", current ? current->pid : 0, p->pid);
+ printd("[PID %d] freeing proc: %d\n", current ? current->pid : 0, p->pid);
// All parts of the kernel should have decref'd before __proc_free is called
assert(p->env_refcnt == 0);
+ proc_free_arch(p);
+
+ // Free any colors allocated to this process
+ if(p->cache_colors_map != global_cache_colors_map) {
+ for(int i=0; i<llc_cache->num_colors; i++)
+ cache_color_free(llc_cache, p->cache_colors_map);
+ cache_colors_map_free(p->cache_colors_map);
+ }
+
// Flush all mapped pages in the user portion of the address space
env_user_mem_free(p);
trapframe_t local_tf;
trapframe_t *tf_to_pop = (trapframe_t *CT(1))a1;
- printk("[kernel] Startcore on physical core %d\n", coreid);
+ printk("[kernel] Startcore on physical core %d for Process %d\n",
+ coreid, p_to_run->pid);
assert(p_to_run);
// TODO: handle silly state (HSS)
if (!tf_to_pop) {
tf_to_pop = &local_tf;
memset(tf_to_pop, 0, sizeof(*tf_to_pop));
- proc_init_trapframe(tf_to_pop);
+ proc_init_trapframe(tf_to_pop,(uint32_t)a2);
// Note the init_tf sets tf_to_pop->tf_esp = USTACKTOP;
- proc_set_tfcoreid(tf_to_pop, (uint32_t)a2);
proc_set_program_counter(tf_to_pop, p_to_run->env_entry);
}
/* the sender of the amsg increfed, thinking we weren't running current. */
spin_lock(&pid_hash_lock);
if (hashtable_count(pid_hash)) {
hashtable_itr_t *phtable_i = hashtable_iterator(pid_hash);
+ printk("PID STATE \n");
+ printk("------------------\n");
do {
- printk("PID: %d\n", hashtable_iterator_key(phtable_i));
+ struct proc* p = hashtable_iterator_value(phtable_i);
+ printk("%8d %s\n", hashtable_iterator_key(phtable_i),p ? procstate2str(p->state) : "(null)");
} while (hashtable_iterator_advance(phtable_i));
}
spin_unlock(&pid_hash_lock);