Added page-coloring support to SPARC port
authorAndrew Waterman <waterman@r53.millennium.berkeley.edu>
Fri, 21 Aug 2009 22:12:11 +0000 (15:12 -0700)
committerKevin Klues <klueska@eecs.berkeley.edu>
Fri, 21 Aug 2009 23:40:51 +0000 (16:40 -0700)
15 files changed:
kern/arch/i386/pmap.c
kern/arch/sparc/Makefrag
kern/arch/sparc/colored_caches.c [new file with mode: 0644]
kern/arch/sparc/colored_page_alloc.h [new file with mode: 0644]
kern/arch/sparc/frontend.c
kern/arch/sparc/page_alloc.c [new file with mode: 0644]
kern/arch/sparc/pmap.c
kern/arch/sparc/types.h
kern/include/colored_page_alloc.h
kern/include/page_alloc.h
kern/src/manager.c
kern/src/page_alloc.c
kern/src/pmap.c
kern/src/testing.c
scripts/mergedep.pl

index 4a91782..d2db7f8 100644 (file)
@@ -426,29 +426,6 @@ vm_init(void)
        tlb_flush_global();
 }
 
-// --------------------------------------------------------------
-// Tracking of physical pages.
-// The 'pages' array has one 'page_t' entry per physical page.
-// Pages are reference counted, and free pages are kept on a linked list.
-// --------------------------------------------------------------
-
-// Initialize page structure and memory free list.
-void page_init(void)
-{
-       // First, make 'pages' point to an array of size 'npages' of 
-       // type 'page_t'.
-       // The kernel uses this structure to keep track of physical pages;
-       // 'npages' equals the number of physical pages in memory.  
-       // round up to the nearest page
-       size_t page_array_size = ROUNDUP(npages*sizeof(page_t), PGSIZE);
-       pages = (page_t*)boot_alloc(page_array_size, PGSIZE);
-       memset(pages, 0, page_array_size);
-       
-       // Now initilaize everything so pages can start to be alloced and freed
-       // from the memory free list
-       page_alloc_init();
-}
-
 //
 // Checks that the kernel part of virtual address space
 // has been setup roughly correctly(by i386_vm_init()).
@@ -640,7 +617,7 @@ pgdir_walk(pde_t *pgdir, const void *SNT va, int create)
        }
        if (page_alloc(&new_table))
                return NULL;
-       new_table->page_ref = 1;
+       page_setref(new_table,1);
        memset(page2kva(new_table), 0, PGSIZE);
        *the_pde = (pde_t)page2pa(new_table) | PTE_P | PTE_W | PTE_U;
        return &((pde_t*COUNT(NPTENTRIES))KADDR(PTE_ADDR(*the_pde)))[PTX(va)];
index a923cd0..877f7ed 100644 (file)
@@ -21,6 +21,8 @@ KERN_ARCH_SRCFILES := $(KERN_ARCH_SRC_DIR)/entry.S \
                       $(KERN_ARCH_SRC_DIR)/console.c \
                       $(KERN_ARCH_SRC_DIR)/frontend.c \
                       $(KERN_ARCH_SRC_DIR)/pmap.c \
+                      $(KERN_ARCH_SRC_DIR)/page_alloc.c \
+                      $(KERN_ARCH_SRC_DIR)/colored_caches.c \
                       $(KERN_ARCH_SRC_DIR)/timer.c \
                       $(KERN_ARCH_SRC_DIR)/env.c \
                       $(KERN_ARCH_SRC_DIR)/smp.c
diff --git a/kern/arch/sparc/colored_caches.c b/kern/arch/sparc/colored_caches.c
new file mode 100644 (file)
index 0000000..ba7a01d
--- /dev/null
@@ -0,0 +1,26 @@
+/* Copyright (c) 2009 The Regents of the University  of California. 
+ * See the COPYRIGHT files at the top of this source tree for full 
+ * license information.
+ * 
+ * Kevin Klues <klueska@cs.berkeley.edu>    
+ */
+
+#include <colored_caches.h>
+
+// Global variables
+cache_t l1,l2,l3;
+available_caches_t available_caches;
+
+/************** Cache Related Functions  *****************/
+void cache_init() 
+{
+       // Initialize the caches available on this system.
+       // TODO: Should call out to something reading the acpi tables from 
+       // memory, or something similar.  For now, just initialize them inline
+       init_cache_properties(&l1,   32,  8, 64);
+       init_cache_properties(&l2,  256,  8, 64);
+       init_cache_properties(&l3, 8192, 16, 64);
+       available_caches.l1 = TRUE;
+       available_caches.l2 = FALSE;
+       available_caches.l3 = FALSE;
+}
diff --git a/kern/arch/sparc/colored_page_alloc.h b/kern/arch/sparc/colored_page_alloc.h
new file mode 100644 (file)
index 0000000..4aa5813
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2009 The Regents of the University  of California.  
+ * See the COPYRIGHT files at the top of this source tree for full 
+ * license information.
+ */
+/**
+ * @author Kevin Klues <klueska@cs.berkeley.edu>
+ */
+#ifndef ROS_KERN_ARCH_COLORED_PAGE_ALLOC_H
+#define ROS_KERN_ARCH_COLORED_PAGE_ALLOC_H
+
+/********** Page Coloring Related Macros ************/
+// Define these to make sure that each level of the cache
+// is initialized and managed properly
+#define DECLARE_CACHE_COLORED_PAGE_LINKS()                    \
+       DECLARE_CACHE_COLORED_PAGE_LINK(l1)                       \
+       DECLARE_CACHE_COLORED_PAGE_LINK(l2)                       \
+       DECLARE_CACHE_COLORED_PAGE_LINK(l3)
+
+#define DECLARE_CACHE_COLORED_PAGE_FREE_LISTS()               \
+       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l1)                  \
+       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l2)                  \
+       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l3)
+       
+#define DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LISTS()        \
+       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l1)           \
+       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l2)           \
+       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l3)
+       
+#define DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTIONS()          \
+       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l1)             \
+       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l2)             \
+       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l3)
+
+#define INIT_CACHE_COLORED_PAGE_FREE_LISTS()                  \
+       INIT_CACHE_COLORED_PAGE_FREE_LIST(l1)                     \
+       INIT_CACHE_COLORED_PAGE_FREE_LIST(l2)                     \
+       INIT_CACHE_COLORED_PAGE_FREE_LIST(l3)
+
+#define REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LISTS(page)      \
+       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l1)       \
+       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l2)       \
+       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l3)
+       
+#define INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LISTS(page)      \
+       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l1)       \
+       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l2)       \
+       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l3)
+
+#endif // CACHE_COLORING_PAGE_ALLOC_H
index eb9267a..9c76ba6 100644 (file)
@@ -4,7 +4,7 @@
 #pragma nodeputy
 #endif
 
-volatile int magic_mem[8] __attribute__((align(32)));
+volatile int magic_mem[8] __attribute__((aligned(32)));
 
 int32_t frontend_syscall(int32_t syscall_num, uint32_t arg0, uint32_t arg1, uint32_t arg2)
 {
@@ -28,12 +28,6 @@ int32_t frontend_syscall(int32_t syscall_num, uint32_t arg0, uint32_t arg1, uint
        while(magic_mem[7] == 0)
                ;
 
-       magic_mem[0] = 0;
-
-       // wait for front-end ack
-       while(magic_mem[7] == 1)
-               ;
-
        ret = magic_mem[1];
 
        spin_unlock_irqsave(&lock);
diff --git a/kern/arch/sparc/page_alloc.c b/kern/arch/sparc/page_alloc.c
new file mode 100644 (file)
index 0000000..a5f9aca
--- /dev/null
@@ -0,0 +1,60 @@
+/* Copyright (c) 2009 The Regents of the University  of California. 
+ * See the COPYRIGHT files at the top of this source tree for full 
+ * license information.
+ * 
+ * Kevin Klues <klueska@cs.berkeley.edu>    
+ */
+#ifdef __DEPUTY__
+#pragma nodeputy
+#endif
+
+#include <sys/queue.h>
+#include <page_alloc.h>
+#include <pmap.h>
+#include <kmalloc.h>
+#include <multiboot.h>
+
+page_list_t page_free_list;    // Free list of physical pages
+DECLARE_CACHE_COLORED_PAGE_FREE_LISTS(); // Free list of pages filed by color
+
+/*
+ * Initialize the memory free lists.
+ * After this point, ONLY use the functions below
+ * to allocate and deallocate physical memory via the 
+ * page_free_lists. 
+ */
+void page_alloc_init() 
+{
+       // Now, initialize the lists required to manage the page free lists
+       LIST_INIT(&page_free_list);
+       INIT_CACHE_COLORED_PAGE_FREE_LISTS();
+       
+       //  Finally, mark the pages already in use by the kernel. 
+       //  1) Mark page 0 as in use.
+       //     This way we preserve the real-mode IDT and BIOS structures
+       //     in case we ever need them.  (Currently we don't, but...)
+       //  2) Mark the rest of base memory as free.
+       //  3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM).
+       //     Mark it as in use so that it can never be allocated.      
+       //  4) Then extended memory [EXTPHYSMEM, ...).
+       //     Some of it is in use, some is free.
+       int i;
+       physaddr_t physaddr_after_kernel = PADDR(ROUNDUP(boot_freemem, PGSIZE));
+
+       // mark [0, physaddr_after_kernel) as in-use
+       for(i = 0; i < PPN(physaddr_after_kernel); i++)
+               pages[i].page_ref = 1;
+
+       // mark [physaddr_after_kernel, maxaddrpa) as free
+       for(i = PPN(physaddr_after_kernel); i < PPN(maxaddrpa); i++)
+       {
+               pages[i].page_ref = 0;
+               LIST_INSERT_HEAD(&page_free_list,&pages[i],global_link);
+               INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LISTS(&pages[i]);
+       }
+
+       // mark [maxaddrpa, ...) as in-use (as they are invalid)
+       for(i = PPN(maxaddrpa); i < npages; i++)
+               pages[i].page_ref = 1;
+}
index a4c1171..0dd7574 100644 (file)
@@ -4,13 +4,12 @@
 
 #include <arch/mmu.h>
 #include <ros/memlayout.h>
-#include <multiboot.h>
 #include <pmap.h>
 #include <string.h>
+#include <kmalloc.h>
 
 physaddr_t boot_cr3;
 pde_t* boot_pgdir;
-char* boot_freemem;
 page_t* pages;
 page_list_t page_free_list;
 
@@ -24,39 +23,11 @@ vm_init(void)
        boot_pgdir = l1_page_table;
        boot_cr3 = PADDR(boot_pgdir);
 
-       size_t page_array_size = ROUNDUP(npage*sizeof(page_t),PGSIZE);
-       pages = (page_t*)boot_alloc(page_array_size,PGSIZE);
-       memset(pages,0,page_array_size);
-
        size_t env_array_size = ROUNDUP(NENV*sizeof(env_t), PGSIZE);
        envs = (env_t *)boot_alloc(env_array_size, PGSIZE);
        memset(envs, 0, env_array_size);
 }
 
-void
-page_init(void)
-{
-       uintptr_t i;
-       physaddr_t physaddr_after_kernel = PADDR(ROUNDUP(boot_freemem,PGSIZE));
-
-       LIST_INIT(&page_free_list);
-
-       // mark [0, physaddr_after_kernel) as in-use
-       for(i = 0; i < PPN(physaddr_after_kernel); i++)
-               pages[i].pp_ref = 1;
-
-       // mark [physaddr_after_kernel, maxaddrpa) as free
-       for(i = PPN(physaddr_after_kernel); i < PPN(maxaddrpa); i++)
-       {
-               pages[i].pp_ref = 0;
-               LIST_INSERT_HEAD(&page_free_list,&pages[i],pp_link);
-       }
-
-       // mark [maxaddrpa, ...) as in-use (as they are invalid)
-       for(i = PPN(maxaddrpa); i < npage; i++)
-               pages[i].pp_ref = 1;
-}
-
 error_t
 pagetable_remove(pde_t* l1pt, void* va)
 {
@@ -87,7 +58,7 @@ pgdir_walk(pde_t* l1pt, const void*SNT va, int create)
 
                if(page_alloc(&new_table))
                        return NULL;
-               new_table->pp_ref = 1;
+               page_setref(new_table,1);
                memset(page2kva(new_table),0,PGSIZE);
 
                l2_tables_per_page = PGSIZE/(sizeof(pte_t)*NL2ENTRIES);
@@ -98,7 +69,7 @@ pgdir_walk(pde_t* l1pt, const void*SNT va, int create)
                        if(l1pt[l1x_start+i] != 0)
                                continue;
 
-                       new_table->pp_ref++;
+                       page_incref(new_table);
                        pa = page2pa(new_table) + i*sizeof(pte_t)*NL2ENTRIES;
                        l1pt[l1x_start+i] = PTD(pa);
                }
@@ -120,7 +91,7 @@ pgdir_walk(pde_t* l1pt, const void*SNT va, int create)
 
                if(page_alloc(&new_table))
                        return NULL;
-               new_table->pp_ref = 1;
+               page_setref(new_table,1);
                memset(page2kva(new_table),0,PGSIZE);
 
                l3_tables_per_page = PGSIZE/(sizeof(pte_t)*NL3ENTRIES);
@@ -131,7 +102,7 @@ pgdir_walk(pde_t* l1pt, const void*SNT va, int create)
                        if(l2pt[l2x_start+i] != 0)
                                continue;
 
-                       new_table->pp_ref++;
+                       page_incref(new_table);
                        pa = page2pa(new_table) + i*sizeof(pte_t)*NL3ENTRIES;
                        l2pt[l2x_start+i] = PTD(pa);
                }
@@ -188,13 +159,13 @@ page_check(void)
        }
        assert(PTD_ADDR(boot_pgdir[0]) == page2pa(pp0));
        assert(check_va2pa(boot_pgdir, 0x0) == page2pa(pp1));
-       assert(pp1->pp_ref == 1);
-       assert(pp0->pp_ref == 1);
+       assert(pp1->page_ref == 1);
+       assert(pp0->page_ref == 1);
 
        // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
        assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
        assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
-       assert(pp2->pp_ref == 1);
+       assert(pp2->page_ref == 1);
 
        // Make sure that pgdir_walk returns a pointer to the pte and
        // not the table or some other garbage
@@ -209,7 +180,7 @@ page_check(void)
        // should be able to map pp2 at PGSIZE because it's already there
        assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, PTE_U) == 0);
        assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
-       assert(pp2->pp_ref == 1);
+       assert(pp2->page_ref == 1);
 
        // Make sure that we actually changed the permission on pp2 when we re-mapped it
        {
@@ -231,8 +202,8 @@ page_check(void)
        assert(check_va2pa(boot_pgdir, 0) == page2pa(pp1));
        assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
        // ... and ref counts should reflect this
-       assert(pp1->pp_ref == 2);
-       assert(pp2->pp_ref == 0);
+       assert(pp1->page_ref == 2);
+       assert(pp2->page_ref == 0);
 
        // pp2 should be returned by page_alloc
        assert(page_alloc(&pp) == 0 && pp == pp2);
@@ -241,15 +212,15 @@ page_check(void)
        page_remove(boot_pgdir, 0x0);
        assert(check_va2pa(boot_pgdir, 0x0) == ~0);
        assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
-       assert(pp1->pp_ref == 1);
-       assert(pp2->pp_ref == 0);
+       assert(pp1->page_ref == 1);
+       assert(pp2->page_ref == 0);
 
        // unmapping pp1 at PGSIZE should free it
        page_remove(boot_pgdir, (void*) PGSIZE);
        assert(check_va2pa(boot_pgdir, 0x0) == ~0);
        assert(check_va2pa(boot_pgdir, PGSIZE) == ~0);
-       assert(pp1->pp_ref == 0);
-       assert(pp2->pp_ref == 0);
+       assert(pp1->page_ref == 0);
+       assert(pp2->page_ref == 0);
 
        // so it should be returned by page_alloc
        assert(page_alloc(&pp) == 0 && pp == pp1);
@@ -260,8 +231,8 @@ page_check(void)
        // forcibly take pp0 back
        assert(PTD_ADDR(boot_pgdir[0]) == page2pa(pp0));
        boot_pgdir[0] = 0;
-       assert(pp0->pp_ref == 1);
-       pp0->pp_ref = 0;
+       assert(pp0->page_ref == 1);
+       pp0->page_ref = 0;
 
        // Catch invalid pointer addition in pgdir_walk - i.e. pgdir + PDX(va)
        {
@@ -275,7 +246,7 @@ page_check(void)
 
          // Clean up again
          boot_pgdir[PDX(va)] = 0;
-         pp0->pp_ref = 0;
+         pp0->page_ref = 0;
        }
 
        // give free list back
index beddd75..7e55ca9 100644 (file)
@@ -54,6 +54,22 @@ typedef int32_t ssize_t;
 // off_t is used for file offsets and lengths.
 typedef int32_t off_t;
 
+#define NUM_ADDR_BITS 32
+#define MAX_VADDR     ((uint64_t)(~0) >> (64-NUM_ADDR_BITS))
+
+//Constants for byte sizes
+#define ONE_KILOBYTE  (1L<<10)
+#define ONE_MEGABYTE  (1L<<20)
+#define ONE_GIGABYTE  (1L<<30)
+
+// Return the integer logarithm of the value provided rounded up
+static inline uint32_t LOG2(uint32_t value)
+{
+    uint32_t l = 0;
+    while( (value >> l) > 1 ) ++l;
+    return l;
+}
+
 // Efficient min and max operations
 #define MIN(_a, _b)                                            \
 ({                                                             \
index 91ddbae..4fce9d1 100644 (file)
@@ -27,7 +27,7 @@
 error_t _cache##_page_alloc(page_t** page, size_t color)                      \
 {                                                                             \
        /*      TODO: Put a lock around this */                                       \
-       if(!LIST_EMPTY(&(_cache##_cache_colored_page_list)[(color)])) {           \
+       if(available_caches._cache && !LIST_EMPTY(&(_cache##_cache_colored_page_list)[(color)])) {           \
                *(page) = LIST_FIRST(&(_cache##_cache_colored_page_list)[(color)]);   \
                LIST_REMOVE(*page, global_link);                                      \
                REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LISTS(page);                     \
index b60679f..8be20ce 100644 (file)
@@ -42,7 +42,8 @@ error_t l3_page_alloc(page_t *SAFE *page, size_t color);
 error_t page_free(page_t *SAFE page);
 void page_incref(page_t *SAFE page);
 void page_decref(page_t *SAFE page);
-void page_refset(page_t *SAFE page, size_t val);
+size_t page_getref(page_t *SAFE page);
+void page_setref(page_t *SAFE page, size_t val);
 int page_is_free(size_t ppn);
 
 #endif //PAGE_ALLOC_H
index 7f88665..43aee00 100644 (file)
@@ -27,6 +27,8 @@ void manager(void)
        static uint8_t progress = 0;
        env_t *envs[256];
 
+       test_page_coloring();
+
        switch (progress++) {
                case 0:
                        envs[0] = kfs_proc_create(kfs_lookup_path("roslib_hello"));
index f19f1d1..80dd5cb 100644 (file)
@@ -155,3 +155,11 @@ void page_setref(page_t *page, size_t val)
        page->page_ref = val;
 }
 
+/*
+ * Get the reference count on a page
+ */
+size_t page_getref(page_t *page)
+{
+       return page->page_ref;
+}
+
index 6ba6330..c292eb0 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <ros/error.h>
 
+#include <kmalloc.h>
 #include <atomic.h>
 #include <string.h>
 #include <assert.h>
 
 static void *DANGEROUS user_mem_check_addr;
 
+// --------------------------------------------------------------
+// Tracking of physical pages.
+// The 'pages' array has one 'page_t' entry per physical page.
+// Pages are reference counted, and free pages are kept on a linked list.
+// --------------------------------------------------------------
+  
+// Initialize page structure and memory free list.
+void page_init(void)
+{
+       // First, make 'pages' point to an array of size 'npages' of
+       // type 'page_t'.
+       // The kernel uses this structure to keep track of physical pages;
+       // 'npages' equals the number of physical pages in memory.
+       // round up to the nearest page
+       size_t page_array_size = ROUNDUP(npages*sizeof(page_t), PGSIZE);
+       pages = (page_t*)boot_alloc(page_array_size, PGSIZE);
+       memset(pages, 0, page_array_size);
+
+       // Now initilaize everything so pages can start to be alloced and freed
+       // from the memory free list
+       page_alloc_init();
+}
+
 //
 // Map the physical page 'pp' at virtual address 'va'.
 // The permissions (the low 12 bits) of the page table
index 54f3c37..bc6060c 100644 (file)
@@ -96,7 +96,7 @@ void test_page_coloring(void)
        cprintf("\n");
 
        //Print some stats about our memory
-       cprintf("Max Address: %u\n", MAX_VADDR);
+       cprintf("Max Address: %llu\n", MAX_VADDR);
        cprintf("Num Pages: %u\n", npages);
 
        //Declare a local variable for allocating pages 
index f942ed1..45b6b2b 100644 (file)
@@ -33,7 +33,7 @@ sub readdeps {
                        my $target = $filename;
                        my $deplines = $2;
                        my $slash = $3;
-                       $target =~ s/\.d/\.o/;
+                       $target =~ s/\.d$/\.o/;
                        while ($slash ne '') {
                                $_ = <DEPFILE>;
                                defined($_) or die