1 /* See COPYRIGHT for copyright information. */
23 #include <ros/syscall.h>
24 #include <ros/error.h>
26 env_t *envs = NULL; // All environments
28 // TODO: make this a struct of info including the pointer and cacheline-align it
29 // This lets the kernel know what process is running on the core it traps into.
30 // A lot of the Env business, including this and its usage, will change when we
31 // redesign the env as a multi-process.
32 env_t* (RO curenvs)[MAX_NUM_CPUS] = {[0 ... (MAX_NUM_CPUS-1)] NULL};
34 #define ENVGENSHIFT 12 // >= LOGNENV
37 // Converts an envid to an env pointer.
40 // 0 on success, -EBADENV on error.
41 // On success, sets *env_store to the environment.
42 // On error, sets *env_store to NULL.
45 envid2env(envid_t envid, env_t **env_store, bool checkperm)
49 // If envid is zero, return the current environment.
55 // Look up the Env structure via the index part of the envid,
56 // then check the env_id field in that env_t
57 // to ensure that the envid is not stale
58 // (i.e., does not refer to a _previous_ environment
59 // that used the same slot in the envs[] array).
60 e = &envs[ENVX(envid)];
61 if (e->state == ENV_FREE || e->env_id != envid) {
66 // Check that the calling environment has legitimate permission
67 // to manipulate the specified environment.
68 // If checkperm is set, the specified environment
69 // must be either the current environment
70 // or an immediate child of the current environment.
71 // TODO: should check for current being null
72 if (checkperm && e != current && e->env_parent_id != current->env_id) {
82 // Mark all environments in 'envs' as free, set their env_ids to 0,
83 // and insert them into the proc_freelist.
84 // Insert in reverse order, so that the first call to env_alloc()
86 // TODO: get rid of this whole array bullshit
94 // core 0 is not idle, all others are (for now)
95 spin_lock(&idle_lock);
96 num_idlecores = num_cpus - 1;
97 for (i = 0; i < num_idlecores; i++)
98 idlecoremap[i] = i + 1;
99 spin_unlock(&idle_lock);
100 atomic_init(&num_envs, 0);
101 TAILQ_INIT(&proc_freelist);
102 assert(envs != NULL);
103 for (i = NENV-1; i >= 0; i--) {
104 // these should already be set from when i memset'd the array to 0
105 envs[i].state = ENV_FREE;
107 TAILQ_INSERT_HEAD(&proc_freelist, &envs[i], proc_link);
113 // Initialize the kernel virtual memory layout for environment e.
114 // Allocate a page directory, set e->env_pgdir and e->env_cr3 accordingly,
115 // and initialize the kernel portion of the new environment's address space.
116 // Do NOT (yet) map anything into the user portion
117 // of the environment's virtual address space.
119 // Returns 0 on success, < 0 on error. Errors include:
120 // -ENOMEM if page directory or table could not be allocated.
123 env_setup_vm(env_t *e)
124 WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
127 page_t *pgdir = NULL;
128 page_t *pginfo[PROCINFO_NUM_PAGES] = {NULL};
129 page_t *pgdata[PROCDATA_NUM_PAGES] = {NULL};
130 static page_t * RO shared_page = 0;
133 * First, allocate a page for the pgdir of this process and up
134 * its reference count since this will never be done elsewhere
136 r = page_alloc(&pgdir);
141 * Next, set up the e->env_pgdir and e->env_cr3 pointers to point
142 * to this newly allocated page and clear its contents
144 memset(page2kva(pgdir), 0, PGSIZE);
145 e->env_pgdir = (pde_t *COUNT(NPDENTRIES)) TC(page2kva(pgdir));
146 e->env_cr3 = (physaddr_t) TC(page2pa(pgdir));
149 * Now start filling in the pgdir with mappings required by all newly
150 * created address spaces
153 // Map in the kernel to the top of every address space
154 // should be able to do this so long as boot_pgdir never has
155 // anything put below UTOP
156 // TODO check on this! had a nasty bug because of it
157 // this is a bit wonky, since if it's not PGSIZE, lots of other things are
159 memcpy(e->env_pgdir, boot_pgdir, NPDENTRIES*sizeof(pde_t));
161 // VPT and UVPT map the env's own page table, with
162 // different permissions.
163 e->env_pgdir[PDX(VPT)] = PTE(PPN(e->env_cr3), PTE_P | PTE_KERN_RW);
164 e->env_pgdir[PDX(UVPT)] = PTE(PPN(e->env_cr3), PTE_P | PTE_USER_RO);
167 * Now allocate and insert all pages required for the shared
168 * procinfo structure into the page table
170 for(int i=0; i<PROCINFO_NUM_PAGES; i++) {
171 if(page_alloc(&pginfo[i]) < 0)
172 goto env_setup_vm_error;
173 if(page_insert(e->env_pgdir, pginfo[i], (void*SNT)(UINFO + i*PGSIZE),
175 goto env_setup_vm_error;
179 * Now allocate and insert all pages required for the shared
180 * procdata structure into the page table
182 for(int i=0; i<PROCDATA_NUM_PAGES; i++) {
183 if(page_alloc(&pgdata[i]) < 0)
184 goto env_setup_vm_error;
185 if(page_insert(e->env_pgdir, pgdata[i], (void*SNT)(UDATA + i*PGSIZE),
187 goto env_setup_vm_error;
191 * Now, set e->env_procinfo, and e->env_procdata to point to
192 * the proper pages just allocated and clear them out.
194 e->env_procinfo = (procinfo_t *SAFE) TC(page2kva(pginfo[0]));
195 e->env_procdata = (procdata_t *SAFE) TC(page2kva(pgdata[0]));
197 memset(e->env_procinfo, 0, sizeof(procinfo_t));
198 memset(e->env_procdata, 0, sizeof(procdata_t));
200 /* Finally, set up the Global Shared Data page for all processes.
201 * Can't be trusted, but still very useful at this stage for us.
202 * Consider removing when we have real processes.
203 * (TODO). Note the page is alloced only the first time through
206 if(page_alloc(&shared_page) < 0)
207 goto env_setup_vm_error;
208 // Up it, so it never goes away. One per user, plus one from page_alloc
209 // This is necessary, since it's in the per-process range of memory that
210 // gets freed during page_free.
211 page_incref(shared_page);
214 // Inserted into every process's address space at UGDATA
215 if(page_insert(e->env_pgdir, shared_page, (void*SNT)UGDATA, PTE_USER_RW) < 0)
216 goto env_setup_vm_error;
221 page_free(shared_page);
222 for(int i=0; i< PROCDATA_NUM_PAGES; i++) {
223 page_free(pgdata[i]);
225 for(int i=0; i< PROCINFO_NUM_PAGES; i++) {
226 page_free(pginfo[i]);
228 env_user_mem_free(e);
234 // Allocates and initializes a new environment.
235 // On success, the new environment is stored in *newenv_store.
237 // Returns 0 on success, < 0 on failure. Errors include:
238 // -ENOFREEENV if all NENVS environments are allocated
239 // -ENOMEM on memory exhaustion
242 env_alloc(env_t **newenv_store, envid_t parent_id)
248 spin_lock(&freelist_lock);
249 e = TAILQ_FIRST(&proc_freelist);
251 TAILQ_REMOVE(&proc_freelist, e, proc_link);
252 spin_unlock(&freelist_lock);
254 spin_unlock(&freelist_lock);
260 // Allocate and set up the page directory for this environment.
261 if ((r = env_setup_vm(e)) < 0) {
262 spin_lock(&freelist_lock);
263 TAILQ_INSERT_HEAD(&proc_freelist, e, proc_link);
264 spin_unlock(&freelist_lock);
268 // Generate an env_id for this environment.
269 generation = (e->env_id + (1 << ENVGENSHIFT)) & ~(NENV - 1);
270 if (generation <= 0) // Don't create a negative env_id.
271 generation = 1 << ENVGENSHIFT;
272 e->env_id = generation | (e - envs);
274 // Set the basic status variables.
275 spinlock_init(&e->proc_lock);
276 e->env_parent_id = parent_id;
277 proc_set_state(e, PROC_CREATED);
281 e->env_entry = 0; // cheating. this really gets set in load_icode
283 memset(&e->vcoremap, -1, sizeof(e->vcoremap));
284 //for (int i = 0; i < MAX_NUM_CPUS; i++)
285 //e->vcoremap[i] = -1;
286 memset(&e->resources, 0, sizeof(e->resources));
288 memset(&e->env_ancillary_state, 0, sizeof(e->env_ancillary_state));
289 memset(&e->env_tf, 0, sizeof(e->env_tf));
290 proc_init_trapframe(&e->env_tf);
293 * Initialize the contents of the e->env_procinfo structure
295 e->env_procinfo->id = (e->env_id & 0x3FF);
298 * Initialize the contents of the e->env_procdata structure
300 // Initialize the generic syscall ring buffer
301 SHARED_RING_INIT(&e->env_procdata->syscallring);
302 // Initialize the backend of the syscall ring buffer
303 BACK_RING_INIT(&e->syscallbackring,
304 &e->env_procdata->syscallring,
307 // Initialize the generic sysevent ring buffer
308 SHARED_RING_INIT(&e->env_procdata->syseventring);
309 // Initialize the frontend of the sysevent ring buffer
310 FRONT_RING_INIT(&e->syseventfrontring,
311 &e->env_procdata->syseventring,
315 atomic_inc(&num_envs);
317 printk("[%08x] new env %08x\n", current ? current->env_id : 0, e->env_id);
323 // Allocate len bytes of physical memory for environment env,
324 // and map it at virtual address va in the environment's address space.
325 // Does not zero or otherwise initialize the mapped pages in any way.
326 // Pages should be writable by user and kernel.
327 // Panic if any allocation attempt fails.
330 segment_alloc(env_t *e, void *SNT va, size_t len)
332 void *SNT start, *SNT end;
338 start = ROUNDDOWN(va, PGSIZE);
339 end = ROUNDUP(va + len, PGSIZE);
341 panic("Wrap-around in memory allocation addresses!");
342 if ((uintptr_t)end > UTOP)
343 panic("Attempting to map above UTOP!");
344 // page_insert/pgdir_walk alloc a page and read/write to it via its address
345 // starting from pgdir (e's), so we need to be using e's pgdir
346 assert(e->env_cr3 == rcr3());
347 num_pages = PPN(end - start);
349 for (i = 0; i < num_pages; i++, start += PGSIZE) {
350 // skip if a page is already mapped. yes, page_insert will page_remove
351 // whatever page was already there, but if we are seg allocing adjacent
352 // regions, we don't want to destroy that old mapping/page
353 // though later on we are told we can ignore this...
354 pte = pgdir_walk(e->env_pgdir, start, 0);
355 if (pte && *pte & PTE_P)
357 if ((r = page_alloc(&page)) < 0)
358 panic("segment_alloc: %e", r);
359 page_insert(e->env_pgdir, page, start, PTE_USER_RW);
364 // Set up the initial program binary, stack, and processor flags
365 // for a user process.
367 // This function loads all loadable segments from the ELF binary image
368 // into the environment's user memory, starting at the appropriate
369 // virtual addresses indicated in the ELF program header.
370 // At the same time it clears to zero any portions of these segments
371 // that are marked in the program header as being mapped
372 // but not actually present in the ELF file - i.e., the program's bss section.
374 // Finally, this function maps one page for the program's initial stack.
376 load_icode(env_t *SAFE e, uint8_t *COUNT(size) binary, size_t size)
378 // asw: copy the headers because they might not be aligned.
381 memcpy(&elfhdr, binary, sizeof(elfhdr));
386 assert(elfhdr.e_magic == ELF_MAGIC);
387 // make sure we have proghdrs to load
388 assert(elfhdr.e_phnum);
390 // to actually access any pages alloc'd for this environment, we
391 // need to have the hardware use this environment's page tables.
392 uintreg_t old_cr3 = rcr3();
394 * Even though we'll decref later and no one should be killing us at this
395 * stage, we're still going to wrap the lcr3s with incref/decref.
397 * Note we never decref on the old_cr3, since we aren't willing to let it
398 * die. It's also not clear who the previous process is - sometimes it
399 * isn't even a process (when the kernel loads on its own, and not in
400 * response to a syscall). Probably need to think more about this (TODO)
402 * This can get a bit tricky if this code blocks (will need to think about a
403 * decref then), if we try to change states, etc.
408 // TODO: how do we do a runtime COUNT?
409 {TRUSTEDBLOCK // zra: TRUSTEDBLOCK until validation is done.
410 for (i = 0; i < elfhdr.e_phnum; i++) {
411 memcpy(&phdr, binary + elfhdr.e_phoff + i*sizeof(phdr), sizeof(phdr));
412 if (phdr.p_type != ELF_PROG_LOAD)
414 // TODO: validate elf header fields!
415 // seg alloc creates PTE_U|PTE_W pages. if you ever want to change
416 // this, there will be issues with overlapping sections
417 segment_alloc(e, (void*SNT)phdr.p_va, phdr.p_memsz);
418 memcpy((void*)phdr.p_va, binary + phdr.p_offset, phdr.p_filesz);
419 memset((void*)phdr.p_va + phdr.p_filesz, 0, phdr.p_memsz - phdr.p_filesz);
422 proc_set_program_counter(&e->env_tf, elfhdr.e_entry);
423 e->env_entry = elfhdr.e_entry;
425 // Now map one page for the program's initial stack
426 // at virtual address USTACKTOP - PGSIZE.
427 segment_alloc(e, (void*SNT)(USTACKTOP - PGSIZE), PGSIZE);
429 // reload the original address space
435 // Allocates a new env and loads the named elf binary into it.
437 env_t* env_create(uint8_t *binary, size_t size)
443 curid = (current ? current->env_id : 0);
444 if ((r = env_alloc(&e, curid)) < 0)
445 panic("env_create: %e", r);
446 load_icode(e, binary, size);
451 // Frees env e and all memory it uses.
458 // Note the environment's demise.
459 printk("[%08x] free env %08x\n", current ? current->env_id : 0, e->env_id);
460 // All parts of the kernel should have decref'd before env_free was called.
461 assert(e->env_refcnt == 0);
463 // Flush all mapped pages in the user portion of the address space
464 env_user_mem_free(e);
466 // free the page directory
470 page_decref(pa2page(pa));
472 // return the environment to the free list
474 spin_lock(&freelist_lock);
475 TAILQ_INSERT_HEAD(&proc_freelist, e, proc_link);
476 spin_unlock(&freelist_lock);
480 #define PER_CPU_THING(type,name)\
481 type SLOCKED(name##_lock) * RWPROTECT name;\
482 type SLOCKED(name##_lock) *\
483 (get_per_cpu_##name)()\
485 { R_PERMITTED(global(name))\
486 return &name[core_id()];\
491 /* This is the top-half of an interrupt handler, where the bottom half is
492 * proc_run (which never returns). Just add it to the delayed work queue,
493 * which (incidentally) can only hold one item at this point.
495 * Note this is rather old, and meant to run a RUNNABLE_S on a worker core.
498 void run_env_handler(trapframe_t *tf, env_t * data)
500 void run_env_handler(trapframe_t *tf, void * data)
504 struct work TP(env_t *) job;
505 struct workqueue TP(env_t *) *CT(1) workqueue =
506 TC(&per_cpu_info[core_id()].workqueue);
507 // this doesn't work, and making it a TP(env_t) is wrong
508 // zra: When you want to use other types, let me know, and I can help
509 // make something that Ivy is happy with.
513 job.func = (func_t)proc_run;
516 if (enqueue_work(workqueue, &job))
517 panic("Failed to enqueue work!");