Stops using boot_alloc during vm_init
authorBarret Rhoden <brho@cs.berkeley.edu>
Sun, 16 Jun 2013 05:56:24 +0000 (22:56 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Sat, 22 Jun 2013 17:29:31 +0000 (10:29 -0700)
Now calls vm_init after fully setting up the free page list.  One
requirement of this is that we have all available physical memory mapped
into the boot page tables.  Our current page allocator gives them out in
reverse order, so we start right below max_paddr.  We're good up to
512GB on amd64.

The benefit of this is we don't have to deal with calling boot_alloc in
our vm_init code, which cleans things up a bit.

kern/arch/x86/pmap32.c
kern/arch/x86/pmap64.c
kern/include/page_alloc.h
kern/src/page_alloc.c
kern/src/pmap.c

index 7493651..7d63535 100644 (file)
@@ -67,67 +67,10 @@ pseudodesc_t gdt_pd = {
 static void check_boot_pgdir(bool pse);
 
 //
-// Given pgdir, a pointer to a page directory,
-// walk the 2-level page table structure to find
-// the page table entry (PTE) for linear address la.
-// Return a pointer to this PTE.
-//
-// If the relevant page table doesn't exist in the page directory:
-//     - If create == 0, return 0.
-//     - Otherwise allocate a new page table, install it into pgdir,
-//       and return a pointer into it.
-//        (Questions: What data should the new page table contain?
-//       And what permissions should the new pgdir entry have?
-//       Note that we use the 486-only "WP" feature of %cr0, which
-//       affects the way supervisor-mode writes are checked.)
-//
-// This function abstracts away the 2-level nature of
-// the page directory by allocating new page tables
-// as needed.
-// 
-// boot_pgdir_walk may ONLY be used during initialization,
-// before the page_free_list has been set up.
-// It should panic on failure.  (Note that boot_alloc already panics
-// on failure.)
-//
-// Supports returning jumbo (4MB PSE) PTEs.  To create with a jumbo, pass in 2.
-// 
-// Maps non-PSE PDEs as U/W.  W so the kernel can, U so the user can read via
-// UVPT.  UVPT security comes from the UVPT mapping (U/R).  All other kernel pages
-// protected at the second layer
-static pte_t*
-boot_pgdir_walk(pde_t *COUNT(NPDENTRIES) pgdir, uintptr_t la, int create)
-{
-       pde_t* the_pde = &pgdir[PDX(la)];
-       void* new_table;
-
-       if (*the_pde & PTE_P) {
-               if (*the_pde & PTE_PS)
-                       return (pte_t*)the_pde;
-               return &((pde_t*COUNT(NPTENTRIES))KADDR(PTE_ADDR(*the_pde)))[PTX(la)];
-       }
-       if (!create)
-               return NULL;
-       if (create == 2) {
-               if (JPGOFF(la))
-                       panic("Attempting to find a Jumbo PTE at an unaligned VA!");
-               *the_pde = PTE_PS | PTE_P;
-               return (pte_t*)the_pde;
-       }
-       new_table = boot_alloc(PGSIZE, PGSIZE);
-       memset(new_table, 0, PGSIZE);
-       *the_pde = (pde_t)PADDR(new_table) | PTE_P | PTE_W | PTE_U | PTE_G;
-       return &((pde_t*COUNT(NPTENTRIES))KADDR(PTE_ADDR(*the_pde)))[PTX(la)];
-}
-
-//
 // Map [la, la+size) of linear address space to physical [pa, pa+size)
 // in the page table rooted at pgdir.  Size is a multiple of PGSIZE.
 // Use permission bits perm|PTE_P for the entries.
 //
-// This function may ONLY be used during initialization,
-// before the page_free_list has been set up.
-//
 // To map with Jumbos, set PTE_PS in perm
 static void
 boot_map_segment(pde_t *COUNT(NPDENTRIES) pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm)
@@ -146,12 +89,14 @@ boot_map_segment(pde_t *COUNT(NPDENTRIES) pgdir, uintptr_t la, size_t size, phys
                        panic("Tried to map a Jumbo page at an unaligned address!");
                // need to index with i instead of la + size, in case of wrap-around
                for (i = 0; i < size; i += JPGSIZE, la += JPGSIZE, pa += JPGSIZE) {
-                       pte = boot_pgdir_walk(pgdir, la, 2);
+                       pte = pgdir_walk(pgdir, (void*)la, 2);
+                       assert(pte);
                        *pte = PTE_ADDR(pa) | PTE_P | perm;
                }
        } else {
                for (i = 0; i < size; i += PGSIZE, la += PGSIZE, pa += PGSIZE) {
-                       pte = boot_pgdir_walk(pgdir, la, 1);
+                       pte = pgdir_walk(pgdir, (void*)la, 1);
+                       assert(pte);
                        if (*pte & PTE_PS)
                                // if we start using the extra flag for PAT, which we aren't,
                                // this will warn, since PTE_PS and PTE_PAT are the same....
@@ -217,8 +162,8 @@ vm_init(void)
 
        //////////////////////////////////////////////////////////////////////
        // create initial page directory.
-       pgdir = boot_alloc(PGSIZE, PGSIZE);
-       memset(pgdir, 0, PGSIZE);
+       pgdir = kpage_zalloc_addr();
+       assert(pgdir);
        boot_pgdir = pgdir;
        boot_cr3 = PADDR(pgdir);
        // helpful if you want to manually walk with kvm / bochs
@@ -478,9 +423,6 @@ error_t     pagetable_remove(pde_t *pgdir, void *va)
 //     with page_alloc.  If this fails, pgdir_walk returns NULL.
 //    - Otherwise, pgdir_walk returns a pointer into the new page table.
 //
-// This is boot_pgdir_walk, but using page_alloc() instead of boot_alloc().
-// Unlike boot_pgdir_walk, pgdir_walk can fail.
-//
 // Hint: you can turn a Page * into the physical address of the
 // page it refers to with page2pa() from kern/pmap.h.
 //
@@ -745,75 +687,3 @@ void env_pagetable_free(env_t* e)
        page_decref(pa2page(pa));
        tlbflush();
 }
-
-/* 
-
-    // testing code for boot_pgdir_walk 
-       pte_t* temp;
-       temp = boot_pgdir_walk(pgdir, VPT + (VPT >> 10), 1);
-       cprintf("pgdir = %p\n", pgdir);
-       cprintf("test recursive walking pte_t* = %p\n", temp);
-       cprintf("test recursive walking entry = %p\n", PTE_ADDR(temp));
-       temp = boot_pgdir_walk(pgdir, 0xc0400000, 1);
-       cprintf("LA = 0xc0400000 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0400070, 1);
-       cprintf("LA = 0xc0400070 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0800000, 0);
-       cprintf("LA = 0xc0800000, no create = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0600070, 1);
-       cprintf("LA = 0xc0600070 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0600090, 0);
-       cprintf("LA = 0xc0600090, nc = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0608070, 0);
-       cprintf("LA = 0xc0608070, nc = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0800070, 1);
-       cprintf("LA = 0xc0800070 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0b00070, 0);
-       cprintf("LA = 0xc0b00070, nc = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0c00000, 0);
-       cprintf("LA = 0xc0c00000, nc = %p\n", temp);
-
-       // testing for boot_map_seg
-       cprintf("\n");
-       cprintf("before mapping 1 page to 0x00350000\n");
-       cprintf("0xc4000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc4000000, 1));
-       cprintf("0xc4000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc4000000, 1)));
-       boot_map_segment(pgdir, 0xc4000000, 4096, 0x00350000, PTE_W);
-       cprintf("after mapping\n");
-       cprintf("0xc4000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc4000000, 1));
-       cprintf("0xc4000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc4000000, 1)));
-
-       cprintf("\n");
-       cprintf("before mapping 3 pages to 0x00700000\n");
-       cprintf("0xd0000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0000000, 1));
-       cprintf("0xd0000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0000000, 1)));
-       cprintf("0xd0001000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0001000, 1));
-       cprintf("0xd0001000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0001000, 1)));
-       cprintf("0xd0002000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0002000, 1));
-       cprintf("0xd0002000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0002000, 1)));
-       boot_map_segment(pgdir, 0xd0000000, 4096*3, 0x00700000, 0);
-       cprintf("after mapping\n");
-       cprintf("0xd0000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0000000, 1));
-       cprintf("0xd0000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0000000, 1)));
-       cprintf("0xd0001000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0001000, 1));
-       cprintf("0xd0001000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0001000, 1)));
-       cprintf("0xd0002000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0002000, 1));
-       cprintf("0xd0002000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0002000, 1)));
-
-       cprintf("\n");
-       cprintf("before mapping 1 unaligned to 0x00500010\n");
-       cprintf("0xc8000010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8000010, 1));
-       cprintf("0xc8000010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8000010, 1)));
-       cprintf("0xc8001010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8001010, 1));
-       cprintf("0xc8001010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8001010, 1)));
-       boot_map_segment(pgdir, 0xc8000010, 4096, 0x00500010, PTE_W);
-       cprintf("after mapping\n");
-       cprintf("0xc8000010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8000010, 1));
-       cprintf("0xc8000010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8000010, 1)));
-       cprintf("0xc8001010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8001010, 1));
-       cprintf("0xc8001010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8001010, 1)));
-
-       cprintf("\n");
-       boot_map_segment(pgdir, 0xe0000000, 4096, 0x10000000, PTE_W);
-
-*/
index 1c529b8..c62fe72 100644 (file)
@@ -69,60 +69,6 @@ pseudodesc_t gdt_pd = {
 static void check_boot_pgdir(bool pse);
 
 //
-// Given pgdir, a pointer to a page directory,
-// walk the 2-level page table structure to find
-// the page table entry (PTE) for linear address la.
-// Return a pointer to this PTE.
-//
-// If the relevant page table doesn't exist in the page directory:
-//     - If create == 0, return 0.
-//     - Otherwise allocate a new page table, install it into pgdir,
-//       and return a pointer into it.
-//        (Questions: What data should the new page table contain?
-//       And what permissions should the new pgdir entry have?
-//       Note that we use the 486-only "WP" feature of %cr0, which
-//       affects the way supervisor-mode writes are checked.)
-//
-// This function abstracts away the 2-level nature of
-// the page directory by allocating new page tables
-// as needed.
-// 
-// boot_pgdir_walk may ONLY be used during initialization,
-// before the page_free_list has been set up.
-// It should panic on failure.  (Note that boot_alloc already panics
-// on failure.)
-//
-// Supports returning jumbo (4MB PSE) PTEs.  To create with a jumbo, pass in 2.
-// 
-// Maps non-PSE PDEs as U/W.  W so the kernel can, U so the user can read via
-// UVPT.  UVPT security comes from the UVPT mapping (U/R).  All other kernel pages
-// protected at the second layer
-static pte_t*
-boot_pgdir_walk(pde_t *COUNT(NPDENTRIES) pgdir, uintptr_t la, int create)
-{
-       pde_t* the_pde = &pgdir[PDX(la)];
-       void* new_table;
-
-       if (*the_pde & PTE_P) {
-               if (*the_pde & PTE_PS)
-                       return (pte_t*)the_pde;
-               return &((pde_t*COUNT(NPTENTRIES))KADDR(PTE_ADDR(*the_pde)))[PTX(la)];
-       }
-       if (!create)
-               return NULL;
-       if (create == 2) {
-               if (JPGOFF(la))
-                       panic("Attempting to find a Jumbo PTE at an unaligned VA!");
-               *the_pde = PTE_PS | PTE_P;
-               return (pte_t*)the_pde;
-       }
-       new_table = boot_alloc(PGSIZE, PGSIZE);
-       memset(new_table, 0, PGSIZE);
-       *the_pde = (pde_t)PADDR(new_table) | PTE_P | PTE_W | PTE_U | PTE_G;
-       return &((pde_t*COUNT(NPTENTRIES))KADDR(PTE_ADDR(*the_pde)))[PTX(la)];
-}
-
-//
 // Map [la, la+size) of linear address space to physical [pa, pa+size)
 // in the page table rooted at pgdir.  Size is a multiple of PGSIZE.
 // Use permission bits perm|PTE_P for the entries.
@@ -148,12 +94,14 @@ boot_map_segment(pde_t *COUNT(NPDENTRIES) pgdir, uintptr_t la, size_t size, phys
                        panic("Tried to map a Jumbo page at an unaligned address!");
                // need to index with i instead of la + size, in case of wrap-around
                for (i = 0; i < size; i += JPGSIZE, la += JPGSIZE, pa += JPGSIZE) {
-                       pte = boot_pgdir_walk(pgdir, la, 2);
+                       pte = pgdir_walk(pgdir, (void*)la, 2);
+                       assert(pte);
                        *pte = PTE_ADDR(pa) | PTE_P | perm;
                }
        } else {
                for (i = 0; i < size; i += PGSIZE, la += PGSIZE, pa += PGSIZE) {
-                       pte = boot_pgdir_walk(pgdir, la, 1);
+                       pte = pgdir_walk(pgdir, (void*)la, 1);
+                       assert(pte);
                        if (*pte & PTE_PS)
                                // if we start using the extra flag for PAT, which we aren't,
                                // this will warn, since PTE_PS and PTE_PAT are the same....
@@ -223,8 +171,8 @@ vm_init(void)
 
        //////////////////////////////////////////////////////////////////////
        // create initial page directory.
-       pgdir = boot_alloc(PGSIZE, PGSIZE);
-       memset(pgdir, 0, PGSIZE);
+       pgdir = kpage_zalloc_addr();
+       assert(pgdir);
        boot_pgdir = pgdir;
        boot_cr3 = PADDR(pgdir);
        // helpful if you want to manually walk with kvm / bochs
@@ -487,9 +435,6 @@ error_t     pagetable_remove(pde_t *pgdir, void *va)
 //     with page_alloc.  If this fails, pgdir_walk returns NULL.
 //    - Otherwise, pgdir_walk returns a pointer into the new page table.
 //
-// This is boot_pgdir_walk, but using page_alloc() instead of boot_alloc().
-// Unlike boot_pgdir_walk, pgdir_walk can fail.
-//
 // Hint: you can turn a Page * into the physical address of the
 // page it refers to with page2pa() from kern/pmap.h.
 //
@@ -754,75 +699,3 @@ void env_pagetable_free(env_t* e)
        page_decref(pa2page(pa));
        tlbflush();
 }
-
-/* 
-
-    // testing code for boot_pgdir_walk 
-       pte_t* temp;
-       temp = boot_pgdir_walk(pgdir, VPT + (VPT >> 10), 1);
-       cprintf("pgdir = %p\n", pgdir);
-       cprintf("test recursive walking pte_t* = %p\n", temp);
-       cprintf("test recursive walking entry = %p\n", PTE_ADDR(temp));
-       temp = boot_pgdir_walk(pgdir, 0xc0400000, 1);
-       cprintf("LA = 0xc0400000 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0400070, 1);
-       cprintf("LA = 0xc0400070 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0800000, 0);
-       cprintf("LA = 0xc0800000, no create = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0600070, 1);
-       cprintf("LA = 0xc0600070 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0600090, 0);
-       cprintf("LA = 0xc0600090, nc = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0608070, 0);
-       cprintf("LA = 0xc0608070, nc = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0800070, 1);
-       cprintf("LA = 0xc0800070 = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0b00070, 0);
-       cprintf("LA = 0xc0b00070, nc = %p\n", temp);
-       temp = boot_pgdir_walk(pgdir, 0xc0c00000, 0);
-       cprintf("LA = 0xc0c00000, nc = %p\n", temp);
-
-       // testing for boot_map_seg
-       cprintf("\n");
-       cprintf("before mapping 1 page to 0x00350000\n");
-       cprintf("0xc4000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc4000000, 1));
-       cprintf("0xc4000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc4000000, 1)));
-       boot_map_segment(pgdir, 0xc4000000, 4096, 0x00350000, PTE_W);
-       cprintf("after mapping\n");
-       cprintf("0xc4000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc4000000, 1));
-       cprintf("0xc4000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc4000000, 1)));
-
-       cprintf("\n");
-       cprintf("before mapping 3 pages to 0x00700000\n");
-       cprintf("0xd0000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0000000, 1));
-       cprintf("0xd0000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0000000, 1)));
-       cprintf("0xd0001000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0001000, 1));
-       cprintf("0xd0001000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0001000, 1)));
-       cprintf("0xd0002000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0002000, 1));
-       cprintf("0xd0002000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0002000, 1)));
-       boot_map_segment(pgdir, 0xd0000000, 4096*3, 0x00700000, 0);
-       cprintf("after mapping\n");
-       cprintf("0xd0000000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0000000, 1));
-       cprintf("0xd0000000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0000000, 1)));
-       cprintf("0xd0001000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0001000, 1));
-       cprintf("0xd0001000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0001000, 1)));
-       cprintf("0xd0002000's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xd0002000, 1));
-       cprintf("0xd0002000's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xd0002000, 1)));
-
-       cprintf("\n");
-       cprintf("before mapping 1 unaligned to 0x00500010\n");
-       cprintf("0xc8000010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8000010, 1));
-       cprintf("0xc8000010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8000010, 1)));
-       cprintf("0xc8001010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8001010, 1));
-       cprintf("0xc8001010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8001010, 1)));
-       boot_map_segment(pgdir, 0xc8000010, 4096, 0x00500010, PTE_W);
-       cprintf("after mapping\n");
-       cprintf("0xc8000010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8000010, 1));
-       cprintf("0xc8000010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8000010, 1)));
-       cprintf("0xc8001010's &pte: %08x\n",boot_pgdir_walk(pgdir, 0xc8001010, 1));
-       cprintf("0xc8001010's pte: %08x\n",*(boot_pgdir_walk(pgdir, 0xc8001010, 1)));
-
-       cprintf("\n");
-       boot_map_segment(pgdir, 0xe0000000, 4096, 0x10000000, PTE_W);
-
-*/
index 5679d58..5a98312 100644 (file)
@@ -60,6 +60,8 @@ void colored_page_alloc_init(void);
 
 error_t upage_alloc(struct proc* p, page_t *SAFE *page, int zero);
 error_t kpage_alloc(page_t *SAFE *page);
+void *kpage_alloc_addr(void);
+void *kpage_zalloc_addr(void);
 error_t upage_alloc_specific(struct proc* p, page_t *SAFE *page, size_t ppn);
 error_t kpage_alloc_specific(page_t *SAFE *page, size_t ppn);
 
index bf4b082..f2f6571 100644 (file)
@@ -153,6 +153,24 @@ error_t kpage_alloc(page_t** page)
        return ret;
 }
 
+/* Helper: allocates a refcounted page of memory for the kernel's use and
+ * returns the kernel address (kernbase), or 0 on error. */
+void *kpage_alloc_addr(void)
+{
+       struct page *a_page;
+       if (kpage_alloc(&a_page))
+               return 0;
+       return page2kva(a_page);
+}
+
+void *kpage_zalloc_addr(void)
+{
+       void *retval = kpage_alloc_addr();
+       if (retval)
+               memset(retval, 0, PGSIZE);
+       return retval;
+}
+
 /**
  * @brief Allocated 2^order contiguous physical pages.  Will increment the
  * reference count for the pages.
index a3adbad..d4ba124 100644 (file)
@@ -73,10 +73,8 @@ void pmem_init(struct multiboot_info *mbi)
        printk("Highest page number (including reserved): %lu\n", max_nr_pages);
        pages = (struct page*)boot_zalloc(max_nr_pages * sizeof(struct page),
                                          PGSIZE);
-       /* Turn on paging before turning on the page allocator, we still use
-        * boot_alloc in vm_init.  Doesn't really matter much either way. */
-       vm_init();
        page_alloc_init(mbi);
+       vm_init();
 
        static_assert(PROCINFO_NUM_PAGES*PGSIZE <= PTSIZE);
        static_assert(PROCDATA_NUM_PAGES*PGSIZE <= PTSIZE);