/* See COPYRIGHT for copyright information. */
-#ifdef __SHARC__
-#pragma nosharc
-#endif
-
#include <arch/arch.h>
#include <arch/mmu.h>
-#include <arch/bitmask.h>
+#include <bitmask.h>
#include <elf.h>
#include <smp.h>
-
#include <atomic.h>
#include <string.h>
#include <assert.h>
// -ENOMEM if page directory or table could not be allocated.
//
int env_setup_vm(env_t *e)
-WRITES(e->env_pgdir, e->env_cr3, e->procinfo, e->procdata)
{
- int i, r;
- page_t *pgdir = NULL;
- static page_t * RO shared_page = 0;
-
- /*
- * First, allocate a page for the pgdir of this process and up
- * its reference count since this will never be done elsewhere
- */
- r = kpage_alloc(&pgdir);
- if(r < 0) return r;
-
- /*
- * Next, set up the e->env_pgdir and e->env_cr3 pointers to point
- * to this newly allocated page and clear its contents
- */
- memset(page2kva(pgdir), 0, PGSIZE);
- e->env_pgdir = (pde_t *COUNT(NPDENTRIES)) TC(page2kva(pgdir));
- e->env_cr3 = (physaddr_t) TC(page2pa(pgdir));
-
- /*
- * Now start filling in the pgdir with mappings required by all newly
- * created address spaces
- */
-
- // Map in the kernel to the top of every address space
- // should be able to do this so long as boot_pgdir never has
- // anything put below UTOP
- // TODO check on this! had a nasty bug because of it
- // this is a bit wonky, since if it's not PGSIZE, lots of other things are
- // screwed up...
- memcpy(e->env_pgdir, boot_pgdir, NPDENTRIES*sizeof(pde_t));
-
- // VPT and UVPT map the env's own page table, with
- // different permissions.
- e->env_pgdir[PDX(VPT)] = PTE(LA2PPN(e->env_cr3), PTE_P | PTE_KERN_RW);
- e->env_pgdir[PDX(UVPT)] = PTE(LA2PPN(e->env_cr3), PTE_P | PTE_USER_RO);
+ int i, ret;
+ static page_t *shared_page = 0;
+
+ if ((ret = arch_pgdir_setup(boot_pgdir, &e->env_pgdir)))
+ return ret;
+ e->env_cr3 = arch_pgdir_get_cr3(e->env_pgdir);
/* These need to be contiguous, so the kernel can alias them. Note the
* pages return with a refcnt, but it's okay to insert them since we free
* them manually when the process is cleaned up. */
- if (!(e->procinfo = get_cont_pages(LOG2_UP(PROCINFO_NUM_PAGES), 0)))
+ if (!(e->procinfo = kpages_alloc(PROCINFO_NUM_PAGES * PGSIZE, MEM_WAIT)))
goto env_setup_vm_error_i;
- if (!(e->procdata = get_cont_pages(LOG2_UP(PROCDATA_NUM_PAGES), 0)))
+ if (!(e->procdata = kpages_alloc(PROCDATA_NUM_PAGES * PGSIZE, MEM_WAIT)))
goto env_setup_vm_error_d;
+ /* Normally we would 0 the pages here. We handle it in proc_init_proc*.
+ * Do not start the process without calling those. */
for (int i = 0; i < PROCINFO_NUM_PAGES; i++) {
if (page_insert(e->env_pgdir, kva2page((void*)e->procinfo + i *
- PGSIZE), (void*SNT)(UINFO + i*PGSIZE), PTE_USER_RO) < 0)
+ PGSIZE), (void*)(UINFO + i*PGSIZE), PTE_USER_RO) < 0)
goto env_setup_vm_error;
}
for (int i = 0; i < PROCDATA_NUM_PAGES; i++) {
if (page_insert(e->env_pgdir, kva2page((void*)e->procdata + i *
- PGSIZE), (void*SNT)(UDATA + i*PGSIZE), PTE_USER_RW) < 0)
+ PGSIZE), (void*)(UDATA + i*PGSIZE), PTE_USER_RW) < 0)
+ goto env_setup_vm_error;
+ }
+ for (int i = 0; i < PROCGINFO_NUM_PAGES; i++) {
+ if (page_insert(e->env_pgdir,
+ kva2page((void*)&__proc_global_info + i * PGSIZE),
+ (void*)(UGINFO + i * PGSIZE), PTE_USER_RO) < 0)
goto env_setup_vm_error;
}
- memset(e->procinfo, 0, sizeof(struct procinfo));
- memset(e->procdata, 0, sizeof(struct procdata));
-
- /* Finally, set up the Global Shared Data page for all processes.
- * Can't be trusted, but still very useful at this stage for us.
- * Consider removing when we have real processes.
- * (TODO). Note the page is alloced only the first time through
- */
+ /* Finally, set up the Global Shared Data page for all processes. Can't be
+ * trusted, but still very useful at this stage for us. Consider removing
+ * when we have real processes (TODO).
+ *
+ * Note the page is alloced only the first time through, and its ref is
+ * stored in shared_page. */
if (!shared_page) {
- if(upage_alloc(e, &shared_page,1) < 0)
+ if (upage_alloc(e, &shared_page, 1) < 0)
goto env_setup_vm_error;
- // Up it, so it never goes away. One per user, plus one from page_alloc
- // This is necessary, since it's in the per-process range of memory that
- // gets freed during page_free.
- page_incref(shared_page);
}
-
- // Inserted into every process's address space at UGDATA
- if(page_insert(e->env_pgdir, shared_page, (void*SNT)UGDATA, PTE_USER_RW) < 0)
+ if (page_insert(e->env_pgdir, shared_page, (void*)UGDATA, PTE_USER_RW) < 0)
goto env_setup_vm_error;
return 0;
env_setup_vm_error:
- free_cont_pages(e->procdata, LOG2_UP(PROCDATA_NUM_PAGES));
+ kpages_free(e->procdata, PROCDATA_NUM_PAGES * PGSIZE);
env_setup_vm_error_d:
- free_cont_pages(e->procinfo, LOG2_UP(PROCINFO_NUM_PAGES));
+ kpages_free(e->procinfo, PROCINFO_NUM_PAGES * PGSIZE);
env_setup_vm_error_i:
- page_decref(shared_page);
env_user_mem_free(e, 0, UVPT);
env_pagetable_free(e);
return -ENOMEM;
}
-#define PER_CPU_THING(type,name)\
-type SLOCKED(name##_lock) * RWPROTECT name;\
-type SLOCKED(name##_lock) *\
-(get_per_cpu_##name)()\
-{\
- { R_PERMITTED(global(name))\
- return &name[core_id()];\
- }\
-}
-
/* Frees (decrefs) all memory mapped in the given range */
void env_user_mem_free(env_t* e, void* start, size_t len)
{
assert((uintptr_t)start + len <= UVPT); //since this keeps fucking happening
- int user_page_free(env_t* e, pte_t* pte, void* va, void* arg)
+ int user_page_free(env_t* e, pte_t pte, void* va, void* arg)
{
- if(PAGE_PRESENT(*pte))
- {
- page_t* page = ppn2page(PTE2PPN(*pte));
- *pte = 0;
- page_decref(page);
- } else {
- assert(PAGE_PAGED_OUT(*pte));
- /* TODO: (SWAP) deal with this */
- panic("Swapping not supported!");
- *pte = 0;
- }
+ if (!pte_is_mapped(pte))
+ return 0;
+ page_t *page = pa2page(pte_get_paddr(pte));
+ pte_clear(pte);
+ page_decref(page);
+ /* TODO: consider other states here (like !P, yet still tracking a page,
+ * for VM tricks, page map stuff, etc. Should be okay: once we're
+ * freeing, everything else about this proc is dead. */
return 0;
}
tlbflush();
}
+void set_username(struct username *u, char *name)
+{
+ ERRSTACK(1);
+
+ spin_lock(&u->name_lock);
+
+ if (waserror()) {
+ spin_unlock(&u->name_lock);
+ nexterror();
+ }
+
+ __set_username(u, name);
+
+ poperror();
+ spin_unlock(&u->name_lock);
+}
+
+/*
+ * This function exists so that you can do your own locking - do not use it
+ * without locking the username's spinlock yourself.
+ */
+void __set_username(struct username *u, char *name)
+{
+ if (!name)
+ error(EINVAL, "New username is NULL");
+
+ if (strlen(name) > sizeof(u->name) - 1)
+ error(EINVAL, "New username for process more than %d chars long",
+ sizeof(u->name) - 1);
+
+ // 'backward' copy since reads aren't protected
+ u->name[0] = 0;
+ wmb(); // ensure user.name="" before writing the rest of the new name
+ strlcpy(&u->name[1], &name[1], sizeof(u->name));
+ wmb(); // ensure new name is written before writing first byte
+ u->name[0] = name[0];
+}