Finished up the page coloring stuff
authorKevin Klues <klueska@cs.berkeley.edu>
Wed, 28 Oct 2009 08:07:49 +0000 (09:07 +0100)
committerKevin Klues <klueska@cs.berkeley.edu>
Wed, 28 Oct 2009 08:24:43 +0000 (09:24 +0100)
There are now functions to alloc/free colors on a process
by process basis, as well as alloc/free pages based on the
colors owned by a process.  See kern/include/colored_pages.h
and kern/include/page_alloc.h for the function definitions.

Also, a new matrix program has been added called run_binary_colored
which takes advantage of the fact that colors can be allocated
to a process.  Run it and try it out

20 files changed:
kern/arch/i386/pmap.c
kern/arch/i386/smp_boot.c
kern/arch/sparc/pmap.c
kern/include/colored_caches.h
kern/include/env.h
kern/include/page_alloc.h
kern/src/colored_caches.c
kern/src/env.c
kern/src/init.c
kern/src/mm.c
kern/src/page_alloc.c
kern/src/slab.c
kern/src/syscall.c
kern/src/testing.c
user/apps/parlib/Makefrag
user/apps/parlib/matrix.c
user/apps/parlib/run_binary.c
user/apps/parlib/run_binary_colored.c [new file with mode: 0644]
user/parlib/inc/parlib.h
user/parlib/src/syscall.c

index 5c33f2a..157b78e 100644 (file)
@@ -611,7 +611,7 @@ pgdir_walk(pde_t *pgdir, const void *SNT va, int create)
                *the_pde = PTE_PS | PTE_P;
                return (pte_t*)the_pde;
        }
-       if (page_alloc(&new_table))
+       if (kpage_alloc(&new_table))
                return NULL;
        page_setref(new_table,1);
        memset(page2kva(new_table), 0, PGSIZE);
@@ -689,9 +689,12 @@ page_check(void)
 
        // should be able to allocate three pages
        pp0 = pp1 = pp2 = 0;
-       assert(page_alloc(&pp0) == 0);
-       assert(page_alloc(&pp1) == 0);
-       assert(page_alloc(&pp2) == 0);
+       assert(kpage_alloc(&pp0) == 0);
+       assert(kpage_alloc(&pp1) == 0);
+       assert(kpage_alloc(&pp2) == 0);
+       page_setref(pp0, 0);
+       page_setref(pp1, 0);
+       page_setref(pp2, 0);
 
        assert(pp0);
        assert(pp1 && pp1 != pp0);
@@ -704,7 +707,7 @@ page_check(void)
        }
 
        // should be no free memory
-       assert(page_alloc(&pp) == -ENOMEM);
+       assert(kpage_alloc(&pp) == -ENOMEM);
 
        // Fill pp1 with bogus data and check for invalid tlb entries
        memset(page2kva(pp1), 0xFFFFFFFF, PGSIZE);
@@ -742,7 +745,7 @@ page_check(void)
        }
 
        // should be no free memory
-       assert(page_alloc(&pp) == -ENOMEM);
+       assert(kpage_alloc(&pp) == -ENOMEM);
 
        // should be able to map pp2 at PGSIZE because it's already there
        assert(page_insert(boot_pgdir, pp2, (void*SNT) PGSIZE, PTE_U) == 0);
@@ -757,7 +760,7 @@ page_check(void)
 
        // pp2 should NOT be on the free list
        // could happen in ref counts are handled sloppily in page_insert
-       assert(page_alloc(&pp) == -ENOMEM);
+       assert(kpage_alloc(&pp) == -ENOMEM);
 
        // should not be able to map at PTSIZE because need free page for page table
        assert(page_insert(boot_pgdir, pp0, (void*SNT) PTSIZE, 0) < 0);
@@ -773,7 +776,8 @@ page_check(void)
        assert(pp2->page_ref == 0);
 
        // pp2 should be returned by page_alloc
-       assert(page_alloc(&pp) == 0 && pp == pp2);
+       assert(kpage_alloc(&pp) == 0 && pp == pp2);
+       page_setref(pp, 0);
 
        // unmapping pp1 at 0 should keep pp1 at PGSIZE
        page_remove(boot_pgdir, 0x0);
@@ -790,10 +794,11 @@ page_check(void)
        assert(pp2->page_ref == 0);
 
        // so it should be returned by page_alloc
-       assert(page_alloc(&pp) == 0 && pp == pp1);
+       assert(kpage_alloc(&pp) == 0 && pp == pp1);
+       page_setref(pp, 0);
 
        // should be no free memory
-       assert(page_alloc(&pp) == -ENOMEM);
+       assert(kpage_alloc(&pp) == -ENOMEM);
 
        // forcibly take pp0 back
        assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
index 29fb9f3..37c7b4e 100644 (file)
@@ -100,9 +100,8 @@ void smp_boot(void)
        page_insert(boot_pgdir, pa2page(trampoline_pg), (void*SNT)trampoline_pg, PTE_W);
 
        // Allocate a stack for the cores starting up.  One for all, must share
-       if (page_alloc(&smp_stack))
+       if (kpage_alloc(&smp_stack))
                panic("No memory for SMP boot stack!");
-       page_incref(smp_stack);
        smp_stack_top = SINIT((uintptr_t)(page2kva(smp_stack) + PGSIZE));
 
        // Start the IPI process (INIT, wait, SIPI, wait, SIPI, wait)
@@ -204,9 +203,8 @@ uint32_t smp_main(void)
 
        // Get a per-core kernel stack
        page_t *my_stack;
-       if (page_alloc(&my_stack))
+       if (kpage_alloc(&my_stack))
                panic("Unable to alloc a per-core stack!");
-       page_incref(my_stack);
        memset(page2kva(my_stack), 0, PGSIZE);
 
        // Set up a gdt / gdt_pd for this core, stored at the top of the stack
index 297afa3..7215bae 100644 (file)
@@ -60,9 +60,8 @@ pgdir_walk(pde_t* l1pt, const void*SNT va, int create)
                // space than needed, so also use it for the adjacent
                // l2_tables_per_page-1 pages (if they're unmapped)
 
-               if(page_alloc(&new_table))
+               if(kpage_alloc(&new_table))
                        return NULL;
-               page_setref(new_table,1);
                memset(page2kva(new_table),0,PGSIZE);
 
                l2_tables_per_page = PGSIZE/(sizeof(pte_t)*NL2ENTRIES);
@@ -93,9 +92,8 @@ pgdir_walk(pde_t* l1pt, const void*SNT va, int create)
                if(!create)
                        return NULL;
 
-               if(page_alloc(&new_table))
+               if(kpage_alloc(&new_table))
                        return NULL;
-               page_setref(new_table,1);
                memset(page2kva(new_table),0,PGSIZE);
 
                l3_tables_per_page = PGSIZE/(sizeof(pte_t)*NL3ENTRIES);
index 3510f95..ee32cf5 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <ros/common.h>
 #include <ros/error.h>
-#include <process.h>
+#include <atomic.h>
 
 /****************** Cache Structures ********************/
 typedef struct Cache {
@@ -42,10 +42,14 @@ void init_free_cache_colors_map(cache_t* c);
 size_t get_page_color(uintptr_t page, cache_t RO*c);
 size_t get_offset_in_cache_line(uintptr_t addr, cache_t RO*c);
 void print_cache_properties(char *NT lstring, cache_t RO*c);
-error_t cache_color_alloc(cache_t* c, struct proc* p);
-error_t cache_color_alloc_specific(size_t color, cache_t* c, struct proc* p);
-void cache_color_free(cache_t* c, struct proc* p);
-void cache_color_free_specific(size_t color, cache_t* c, struct proc* p);
+
+uint8_t* cache_colors_map_alloc();
+void cache_colors_map_free(uint8_t* colors_map);
+error_t cache_color_alloc(cache_t* c, uint8_t* colors_map);
+error_t cache_color_alloc_specific(size_t color, cache_t* c, 
+                                         uint8_t* colors_map);
+void cache_color_free(cache_t* c, uint8_t* colors_map);
+void cache_color_free_specific(size_t color, cache_t* c, uint8_t* colors_map);
 
 /****************** Cache Properties *********************/
 inline size_t get_cache_ways_associative(cache_t RO*c);
index cbba634..3afa118 100644 (file)
@@ -62,6 +62,7 @@ struct Env {
        /* Cache color map: bitmap of the cache colors currently allocated to this
         * process */
        uint8_t* cache_colors_map;
+       size_t next_cache_color;
 
        /* Info about this process's resources (granted, desired) for each type. */
        struct resource resources[MAX_NUM_RESOURCES];
index 95ba04e..8bb0bf4 100644 (file)
@@ -13,6 +13,7 @@
 #include <ros/error.h>
 #include <arch/mmu.h>
 #include <colored_page_alloc.h>
+#include <process.h>
 
 /****************** Page Structures *********************/
 struct Page;
@@ -31,24 +32,30 @@ struct Page {
 
 
 /******** Externally visible global variables ************/
+extern uint8_t* global_cache_colors_map;
 extern spinlock_t colored_page_free_list_lock;
 extern page_list_t LCKD(&colored_page_free_list_lock) * RO CT(llc_num_colors)
     colored_page_free_list;
 
 /*************** Functional Interface *******************/
 void page_alloc_init(void);
-error_t page_alloc(page_t *SAFE *page);
+void colored_page_alloc_init(void);
+
+error_t upage_alloc(struct proc* p, page_t *SAFE *page);
+error_t kpage_alloc(page_t *SAFE *page);
+error_t upage_alloc_specific(struct proc* p, page_t *SAFE *page, size_t ppn);
+error_t kpage_alloc_specific(page_t *SAFE *page, size_t ppn);
+error_t colored_upage_alloc(uint8_t* map, page_t *SAFE *page, size_t color);
+error_t page_free(page_t *SAFE page);
+
 void *get_cont_pages(size_t order, int flags);
 void free_cont_pages(void *buf, size_t order);
-error_t page_alloc_specific(page_t *SAFE *page, size_t ppn);
-error_t l1_page_alloc(page_t *SAFE *page, size_t color);
-error_t l2_page_alloc(page_t *SAFE *page, size_t color);
-error_t l3_page_alloc(page_t *SAFE *page, size_t color);
-error_t page_free(page_t *SAFE page);
+
 void page_incref(page_t *SAFE page);
 void page_decref(page_t *SAFE page);
 size_t page_getref(page_t *SAFE page);
 void page_setref(page_t *SAFE page, size_t val);
+
 int page_is_free(size_t ppn);
 
 #endif //PAGE_ALLOC_H
index 38bcc12..d765151 100644 (file)
@@ -167,7 +167,7 @@ static inline void clr_color_range(uint16_t color, uint8_t* map,
 }
 
 static inline error_t __cache_color_alloc_specific(size_t color, cache_t* c, 
-                                                              struct proc* p) 
+                                                         uint8_t* colors_map) 
 {
        if(!GET_BITMASK_BIT(c->free_colors_map, color))
                return -ENOCACHE;       
@@ -179,12 +179,11 @@ static inline error_t __cache_color_alloc_specific(size_t color, cache_t* c,
        if(l3)
                clr_color_range(color, l3->free_colors_map, c, l3);
 
-       printk("I am here now three...\n");
-       set_color_range(color, p->cache_colors_map, c, llc_cache);
+       set_color_range(color, colors_map, c, llc_cache);
        return ESUCCESS;
 }
 
-static inline error_t __cache_color_alloc(cache_t* c, struct proc* p) 
+static inline error_t __cache_color_alloc(cache_t* c, uint8_t* colors_map) 
 {
        if(BITMASK_IS_CLEAR(c->free_colors_map, c->num_colors))
                return -ENOCACHE;       
@@ -195,18 +194,18 @@ static inline error_t __cache_color_alloc(cache_t* c, struct proc* p)
                        break;
        } while(++color);
 
-       return __cache_color_alloc_specific(color, c, p);       
+       return __cache_color_alloc_specific(color, c, colors_map);      
 }
 
 static inline void __cache_color_free_specific(size_t color, cache_t* c, 
-                                                          struct proc* p) 
+                                                     uint8_t* colors_map) 
 {
        if(GET_BITMASK_BIT(c->free_colors_map, color))
                return;
        else {
                size_t r = llc_cache->num_colors / c->num_colors;
                size_t base = color*r;
-               if(!BITMASK_IS_SET_IN_RANGE(p->cache_colors_map, base, base+r))
+               if(!BITMASK_IS_SET_IN_RANGE(colors_map, base, base+r))
                        return;
        }
 
@@ -217,10 +216,10 @@ static inline void __cache_color_free_specific(size_t color, cache_t* c,
        if(l1)
                set_color_range(color, l1->free_colors_map, c, l1);
 
-       clr_color_range(color, p->cache_colors_map, c, llc_cache);
+       clr_color_range(color, colors_map, c, llc_cache);
 }
 
-static inline void __cache_color_free(cache_t* c, struct proc* p) 
+static inline void __cache_color_free(cache_t* c, uint8_t* colors_map) 
 {
        if(BITMASK_IS_FULL(c->free_colors_map, c->num_colors))
                return; 
@@ -230,41 +229,52 @@ static inline void __cache_color_free(cache_t* c, struct proc* p)
                if(!GET_BITMASK_BIT(c->free_colors_map, color)) {
                        size_t r = llc_cache->num_colors / c->num_colors;
                        size_t base = color*r;
-                       if(BITMASK_IS_SET_IN_RANGE(p->cache_colors_map, base, base+r))
+                       if(BITMASK_IS_SET_IN_RANGE(colors_map, base, base+r))
                                break;
                }
        } while(++color < c->num_colors);
        if(color == c->num_colors)
                return;
 
-       __cache_color_free_specific(color, c, p);       
+       __cache_color_free_specific(color, c, colors_map);      
 }
 
-error_t cache_color_alloc(cache_t* c, struct proc* p) 
+uint8_t* cache_colors_map_alloc() {
+       uint8_t* colors_map = kmalloc(llc_cache->num_colors, 0);
+       if(colors_map)
+               CLR_BITMASK(colors_map, llc_cache->num_colors);
+       return colors_map;
+}
+
+void cache_colors_map_free(uint8_t* colors_map) {
+       kfree(colors_map);
+}
+
+error_t cache_color_alloc(cache_t* c, uint8_t* colors_map) 
 {
        spin_lock_irqsave(&cache_colors_lock);
-       error_t e = __cache_color_alloc(c, p);
+       error_t e = __cache_color_alloc(c, colors_map);
        spin_unlock_irqsave(&cache_colors_lock);
        return e;
 }
-error_t cache_color_alloc_specific(size_t color, cache_t* c, struct proc* p) 
+error_t cache_color_alloc_specific(size_t color, cache_t* c, uint8_t* colors_map) 
 {
        spin_lock_irqsave(&cache_colors_lock);
-       error_t e = __cache_color_alloc_specific(color, c, p);
+       error_t e = __cache_color_alloc_specific(color, c, colors_map);
        spin_unlock_irqsave(&cache_colors_lock);
        return e;
 }
 
-void cache_color_free(cache_t* c, struct proc* p) 
+void cache_color_free(cache_t* c, uint8_t* colors_map) 
 {
        spin_lock_irqsave(&cache_colors_lock);
-       __cache_color_free(c, p);
+       __cache_color_free(c, colors_map);
        spin_unlock_irqsave(&cache_colors_lock);
 }
-void cache_color_free_specific(size_t color, cache_t* c, struct proc* p) 
+void cache_color_free_specific(size_t color, cache_t* c, uint8_t* colors_map) 
 {
        spin_lock_irqsave(&cache_colors_lock);
-       __cache_color_free_specific(color, c, p);
+       __cache_color_free_specific(color, c, colors_map);
        spin_unlock_irqsave(&cache_colors_lock);
 }
 
index 514815a..75fc445 100644 (file)
@@ -136,9 +136,8 @@ WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
         * First, allocate a page for the pgdir of this process and up
         * its reference count since this will never be done elsewhere
         */
-       r = page_alloc(&pgdir);
+       r = kpage_alloc(&pgdir);
        if(r < 0) return r;
-       page_incref(pgdir);
 
        /*
         * Next, set up the e->env_pgdir and e->env_cr3 pointers to point
@@ -171,7 +170,7 @@ WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
         * procinfo structure into the page table
         */
        for(int i=0; i<PROCINFO_NUM_PAGES; i++) {
-               if(page_alloc(&pginfo[i]) < 0)
+               if(upage_alloc(e, &pginfo[i]) < 0)
                        goto env_setup_vm_error;
                if(page_insert(e->env_pgdir, pginfo[i], (void*SNT)(UINFO + i*PGSIZE),
                               PTE_USER_RO) < 0)
@@ -183,7 +182,7 @@ WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
         * procdata structure into the page table
         */
        for(int i=0; i<PROCDATA_NUM_PAGES; i++) {
-               if(page_alloc(&pgdata[i]) < 0)
+               if(upage_alloc(e, &pgdata[i]) < 0)
                        goto env_setup_vm_error;
                if(page_insert(e->env_pgdir, pgdata[i], (void*SNT)(UDATA + i*PGSIZE),
                               PTE_USER_RW) < 0)
@@ -206,7 +205,7 @@ WRITES(e->env_pgdir, e->env_cr3, e->env_procinfo, e->env_procdata)
         * (TODO).  Note the page is alloced only the first time through
         */
        if (!shared_page) {
-               if(page_alloc(&shared_page) < 0)
+               if(upage_alloc(e, &shared_page) < 0)
                        goto env_setup_vm_error;
                // Up it, so it never goes away.  One per user, plus one from page_alloc
                // This is necessary, since it's in the per-process range of memory that
@@ -319,6 +318,10 @@ env_alloc(env_t **newenv_store, envid_t parent_id)
 
     { INITSTRUCT(*e)
 
+       // Setup the default map of where to get cache colors from
+       e->cache_colors_map = global_cache_colors_map;
+       e->next_cache_color = 0;
+
        // Allocate and set up the page directory for this environment.
        if ((r = env_setup_vm(e)) < 0) {
                spin_lock(&freelist_lock);
@@ -344,8 +347,6 @@ env_alloc(env_t **newenv_store, envid_t parent_id)
        e->num_vcores = 0;
        for (int i = 0; i < MAX_NUM_CPUS; i++)
                e->vcoremap[i] = -1;
-       e->cache_colors_map = kmalloc(llc_cache->num_colors, 0);
-       CLR_BITMASK(e->cache_colors_map, llc_cache->num_colors);
        memset(&e->resources, 0, sizeof(e->resources));
 
        memset(&e->env_ancillary_state, 0, sizeof(e->env_ancillary_state));
@@ -414,7 +415,7 @@ env_segment_alloc(env_t *e, void *SNT va, size_t len)
                pte = pgdir_walk(e->env_pgdir, start, 0);
                if (pte && *pte & PTE_P)
                        continue;
-               if ((r = page_alloc(&page)) < 0)
+               if ((r = upage_alloc(e, &page)) < 0)
                        panic("env_segment_alloc: %e", r);
                page_insert(e->env_pgdir, page, start, PTE_USER_RW);
        }
@@ -559,6 +560,13 @@ env_free(env_t *e)
        // All parts of the kernel should have decref'd before env_free was called.
        assert(e->env_refcnt == 0);
 
+       // Free any colors allocated to this process
+       if(e->cache_colors_map != global_cache_colors_map) {
+               for(int i=0; i<llc_cache->num_colors; i++)
+                       cache_color_free(llc_cache, e->cache_colors_map);
+               cache_colors_map_free(e->cache_colors_map);
+       }
+
        // Flush all mapped pages in the user portion of the address space
        env_user_mem_free(e);
 
@@ -568,9 +576,6 @@ env_free(env_t *e)
        e->env_cr3 = 0;
        page_decref(pa2page(pa));
 
-       //Free any memory allocated by this process
-       kfree(e->cache_colors_map);
-
        // return the environment to the free list
        e->state = ENV_FREE;
        spin_lock(&freelist_lock);
index 6da3b6e..8ceff5c 100644 (file)
@@ -66,10 +66,11 @@ void kernel_init(multiboot_info_t *mboot_info)
 
        cache_init();
        page_init();
-       page_check();
        kmem_cache_init();
        kmalloc_init();
-       //cache_color_alloc_init();
+       cache_color_alloc_init();
+       colored_page_alloc_init();
+       page_check();
 
        idt_init();
        sysenter_init();
index f26cc83..b5d543e 100644 (file)
@@ -54,7 +54,7 @@ void *mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags,
        }
        page_t *a_page;
        for (int i = 0; i < num_pages; i++) {
-               if (page_alloc(&a_page))
+               if (upage_alloc(p, &a_page))
                        goto mmap_abort;
                // TODO: give them the permissions they actually want
                if (page_insert(p->env_pgdir, a_page, (void*SNT)addr + i*PGSIZE,
index ed80e07..480e276 100644 (file)
 #endif
 
 #include <sys/queue.h>
+#include <arch/bitmask.h>
 #include <page_alloc.h>
 #include <pmap.h>
 #include <string.h>
+#include <kmalloc.h>
 
 #define l1 (available_caches.l1)
 #define l2 (available_caches.l2)
@@ -24,7 +26,19 @@ static error_t __page_alloc_specific(page_t** page, size_t ppn);
 static error_t __page_free(page_t* page);
 
 // Global list of colors allocated to the general purpose memory allocator
-static uint8_t* global_colors_map;
+uint8_t* global_cache_colors_map;
+
+void colored_page_alloc_init()
+{
+       global_cache_colors_map = 
+              kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
+       CLR_BITMASK(global_cache_colors_map, llc_cache->num_colors);
+       cache_color_alloc(llc_cache, global_cache_colors_map);
+       cache_color_alloc(llc_cache, global_cache_colors_map);
+       cache_color_alloc(llc_cache, global_cache_colors_map);
+       cache_color_alloc(llc_cache, global_cache_colors_map);
+       cache_color_alloc(llc_cache, global_cache_colors_map);
+}
 
 /**
  * @brief Clear a Page structure.
@@ -32,34 +46,64 @@ static uint8_t* global_colors_map;
  * The result has null links and 0 refcount.
  * Note that the corresponding physical page is NOT initialized!
  */
-static void page_clear(page_t *SAFE page)
+static void __page_clear(page_t *SAFE page)
 {
        memset(page, 0, sizeof(page_t));
 }
 
-error_t page_alloc_from_color_range(page_t** page,  
-                                    uint16_t base_color,
-                                    uint16_t range) {
-
-       // Find first available color with pages available
-    //  in the proper range
-       int i = base_color;
-       spin_lock_irqsave(&colored_page_free_list_lock);
-       //for(i; i < (base_color+range); i++) {
-       for (i; i < (base_color+range); i++) {
-               if(!LIST_EMPTY(&colored_page_free_list[i]))
-                       break;
-       }
-       // Alocate a page from that color
-       if(i < (base_color+range)) {
-               *page = LIST_FIRST(&colored_page_free_list[i]);
-               LIST_REMOVE(*page, page_link);
-               page_clear(*page);
-               spin_unlock_irqsave(&colored_page_free_list_lock);
-               return ESUCCESS;
-       }
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+#define __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, predicate) \
+       /* Find first available color with pages available */                   \
+    /* in the given range */                                                \
+       int i = base_color;                                                     \
+       for (i; i < (base_color+range); i++) {                                  \
+               if((predicate))                                                     \
+                       break;                                                          \
+       }                                                                       \
+       /* Allocate a page from that color */                                   \
+       if(i < (base_color+range)) {                                            \
+               *page = LIST_FIRST(&colored_page_free_list[i]);                     \
+               LIST_REMOVE(*page, page_link);                                      \
+               __page_clear(*page);                                                \
+               return i;                                                           \
+       }                                                                       \
        return -ENOMEM;
+
+static ssize_t __page_alloc_from_color_range(page_t** page,  
+                                           uint16_t base_color,
+                                           uint16_t range) 
+{
+       __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
+                        !LIST_EMPTY(&colored_page_free_list[i]));
+}
+
+static ssize_t __page_alloc_from_color_map_range(page_t** page, uint8_t* map, 
+                                              size_t base_color, size_t range)
+{  
+       __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
+                   GET_BITMASK_BIT(map, i) && !LIST_EMPTY(&colored_page_free_list[i]))
+}
+
+static ssize_t __colored_page_alloc(uint8_t* map, page_t** page, 
+                                               size_t next_color)
+{
+       ssize_t ret;
+       if((ret = __page_alloc_from_color_map_range(page, map, 
+                                  next_color, llc_cache->num_colors)) < 0)
+               ret = __page_alloc_from_color_map_range(page, map, 0, next_color);
+       return ret;
+}
+
+/* Internal version of page_alloc_specific.  Grab the lock first. */
+static error_t __page_alloc_specific(page_t** page, size_t ppn)
+{
+       page_t* sp_page = ppn2page(ppn);
+       if( sp_page->page_ref != 0 )
+               return -ENOMEM;
+       *page = sp_page;
+       LIST_REMOVE(*page, page_link);
+
+       __page_clear(*page);
+       return 0;
 }
 
 /**
@@ -74,25 +118,35 @@ error_t page_alloc_from_color_range(page_t** page,
  * @return ESUCCESS on success
  * @return -ENOMEM  otherwise
  */
-error_t page_alloc(page_t** page) 
+error_t upage_alloc(struct proc* p, page_t** page)
+{
+       spin_lock_irqsave(&colored_page_free_list_lock);
+       ssize_t ret = __colored_page_alloc(p->cache_colors_map, 
+                                            page, p->next_cache_color);
+       spin_unlock_irqsave(&colored_page_free_list_lock);
+
+       if(ret >= 0)
+               p->next_cache_color = ret;
+       return ret;
+}
+
+error_t kpage_alloc(page_t** page) 
 {
        static size_t next_color = 0;
-       error_t e;
-       for(int i=next_color; i<llc_cache->num_colors; i++) {
-               e = page_alloc_from_color_range(page, i, 1);
-               if(e == ESUCCESS) {
-                       next_color = i+1;
-                       return e;
-               }
-       }
-       for(int i=0; i<next_color; i++) {
-               e = page_alloc_from_color_range(page, i, 1);
-               if(e == ESUCCESS) {
-                       next_color = i+1;
-                       return e;
-               }
+       ssize_t ret;
+       spin_lock_irqsave(&colored_page_free_list_lock);
+       if((ret = __page_alloc_from_color_range(page, next_color, 
+                                   llc_cache->num_colors)) < 0)
+               ret = __page_alloc_from_color_range(page, 0, next_color);
+
+       if(ret >= 0) {
+               next_color = ret;        
+               page_incref(*page);
+               ret = ESUCCESS;
        }
-       return -ENOMEM;
+       spin_unlock_irqsave(&colored_page_free_list_lock);
+       
+       return ret;
 }
 
 /**
@@ -152,18 +206,11 @@ void free_cont_pages(void *buf, size_t order)
 }
 
 /*
- * This macro defines multiple functions of the form:
- * error_t _cache##_page_alloc(page_t** page, size_t color)
- *
- * Each of these functions operates on a different level of 
- * of the cache heirarchy, and allocates a physical page
- * from the list of pages corresponding to the supplied 
- * color for the given cache.  
- * 
+ * Allocates a specific physical page.
  * Does NOT set the contents of the physical page to zero -
  * the caller must do that if necessary.
  *
- * color       -- the color from which to allocate a page
+ * ppn         -- the page number to allocate
  * *page       -- is set to point to the Page struct 
  *                of the newly allocated page
  *
@@ -171,69 +218,19 @@ void free_cont_pages(void *buf, size_t order)
  *   ESUCCESS  -- on success
  *   -ENOMEM   -- otherwise 
  */
-error_t l1_page_alloc(page_t** page, size_t color)
+error_t upage_alloc_specific(struct proc* p, page_t** page, size_t ppn)
 {
-       if(l1)
-       {
-               uint16_t range = llc_cache->num_colors / get_cache_num_page_colors(l1);
-               uint16_t base_color = color*range;
-               return page_alloc_from_color_range(page, base_color, range);
-       }
-       return -ENOCACHE;
-}
-
-error_t l2_page_alloc(page_t** page, size_t color)
-{
-       if(l2)
-       {
-               uint16_t range = llc_cache->num_colors / get_cache_num_page_colors(l2);
-               uint16_t base_color = color*range;
-               return page_alloc_from_color_range(page, base_color, range);
-       }
-       return -ENOCACHE;
-}
-
-error_t l3_page_alloc(page_t** page, size_t color)
-{
-       if(l3)
-       {
-               uint16_t range = llc_cache->num_colors / get_cache_num_page_colors(l3);
-               uint16_t base_color = color*range;
-               return page_alloc_from_color_range(page, base_color, range);
-       }
-       return -ENOCACHE;
-}
-
-/* Internal version of page_alloc_specific.  Grab the lock first. */
-static error_t __page_alloc_specific(page_t** page, size_t ppn)
-{
-       page_t* sp_page = ppn2page(ppn);
-       if( sp_page->page_ref != 0 )
-               return -ENOMEM;
-       *page = sp_page;
-       LIST_REMOVE(*page, page_link);
-
-       page_clear(*page);
+       spin_lock_irqsave(&colored_page_free_list_lock);
+       __page_alloc_specific(page, ppn);
+       spin_unlock_irqsave(&colored_page_free_list_lock);
        return 0;
 }
 
-/*
- * Allocates a specific physical page.
- * Does NOT set the contents of the physical page to zero -
- * the caller must do that if necessary.
- *
- * ppn         -- the page number to allocate
- * *page       -- is set to point to the Page struct 
- *                of the newly allocated page
- *
- * RETURNS 
- *   ESUCCESS  -- on success
- *   -ENOMEM   -- otherwise 
- */
-error_t page_alloc_specific(page_t** page, size_t ppn)
+error_t kpage_alloc_specific(page_t** page, size_t ppn)
 {
        spin_lock_irqsave(&colored_page_free_list_lock);
        __page_alloc_specific(page, ppn);
+       page_incref(*page);
        spin_unlock_irqsave(&colored_page_free_list_lock);
        return 0;
 }
@@ -245,7 +242,7 @@ error_t page_alloc_specific(page_t** page, size_t ppn)
  */
 static error_t __page_free(page_t* page) 
 {
-       page_clear(page);
+       __page_clear(page);
 
        LIST_INSERT_HEAD(
           &(colored_page_free_list[get_page_color(page2ppn(page), llc_cache)]),
index e6d1ab4..73a618e 100644 (file)
@@ -240,9 +240,8 @@ void kmem_cache_grow(struct kmem_cache *cp)
        if (cp->obj_size <= SLAB_LARGE_CUTOFF) {
                // Just get a single page for small slabs
                page_t *a_page;
-               if (page_alloc(&a_page))
+               if (kpage_alloc(&a_page))
                        panic("[German Accent]: OOM!!!");
-               page_incref(a_page);
                // the slab struct is stored at the end of the page
                a_slab = (struct kmem_slab*)(page2kva(a_page) + PGSIZE -
                                             sizeof(struct kmem_slab));
index 347fb07..07bab93 100644 (file)
@@ -23,6 +23,8 @@
 #include <kmalloc.h>
 #include <stdio.h>
 #include <resource.h>
+#include <colored_caches.h>
+#include <arch/bitmask.h>
 #include <kfs.h> // eventually replace this with vfs.h
 
 #ifdef __sparc_v8__
@@ -81,22 +83,27 @@ static ssize_t sys_serial_read(env_t* e, char *DANGEROUS _buf, size_t len)
 //
 
 static ssize_t sys_run_binary(env_t* e, void *DANGEROUS binary_buf,
-                              void*DANGEROUS arg, size_t len) {
+                  void*DANGEROUS arg, size_t len, size_t num_colors)
+{
        uint8_t* new_binary = kmalloc(len, 0);
        if(new_binary == NULL)
                return -ENOMEM;
-       if(memcpy_from_user(e, new_binary, binary_buf, len))
-       {
+       if(memcpy_from_user(e, new_binary, binary_buf, len)) {
                kfree(new_binary);
                proc_destroy(e);
                return 0;
        }
+       kfree(new_binary);
 
        env_t* env = env_create(new_binary, len);
-       kfree(new_binary);
+       if(num_colors > 0) {
+               env->cache_colors_map = cache_colors_map_alloc();
+               for(int i=0; i<num_colors; i++)
+                       cache_color_alloc(llc_cache, env->cache_colors_map);
+       }
        proc_set_state(env, PROC_RUNNABLE_S);
        schedule_proc(env);
-       proc_yield(e); // changed from sys_yield.  did not test this at all.
+       proc_yield(e);
        return 0;
 }
 
@@ -190,17 +197,17 @@ static ssize_t sys_shared_page_alloc(env_t* p1,
                                                       PTE_USER_RW);
        page_t* page;
        env_t* p2 = &(envs[ENVX(p2_id)]);
-       error_t e = page_alloc(&page);
+       error_t e = upage_alloc(p1, &page);
 
        if(e < 0) return e;
 
        void* p2_addr = page_insert_in_range(p2->env_pgdir, page,
-                                            (void*SNT)UTEXT, (void*SNT)UTOP, p2_flags);
+                       (void*SNT)UTEXT, (void*SNT)UTOP, p2_flags);
        if(p2_addr == NULL)
                return -EFAIL;
 
        void* p1_addr = page_insert_in_range(p1->env_pgdir, page,
-                                           (void*SNT)UTEXT, (void*SNT)UTOP, p1_flags);
+                       (void*SNT)UTEXT, (void*SNT)UTOP, p1_flags);
        if(p1_addr == NULL) {
                page_remove(p2->env_pgdir, p2_addr);
                return -EFAIL;
@@ -265,7 +272,7 @@ static void sys_cache_buster(env_t* e, uint32_t num_writes, uint32_t num_pages,
        if (num_pages) {
                spin_lock(&buster_lock);
                for (int i = 0; i < MIN(num_pages, MAX_PAGES); i++) {
-                       page_alloc(&a_page[i]);
+                       upage_alloc(e, &a_page[i]);
                        page_insert(e->env_pgdir, a_page[i], (void*)INSERT_ADDR + PGSIZE*i,
                                    PTE_USER_RW);
                }
@@ -534,8 +541,8 @@ intreg_t syscall(struct proc *p, trapframe_t *tf, uintreg_t syscallno,
                        return sys_serial_read(p, (char *DANGEROUS)a1, (size_t)a2);
        #endif
                case SYS_run_binary:
-                       return sys_run_binary(p, (char *DANGEROUS)a1,
-                                             (char* DANGEROUS)a2, (size_t)a3);
+                       return sys_run_binary(p, (char *DANGEROUS)a1, (char* DANGEROUS)a2, 
+                                                                  (size_t)a3, (size_t)a4);
        #ifdef __NETWORK__
                case SYS_eth_write:
                        return sys_eth_write(p, (char *DANGEROUS)a1, (size_t)a2);
index 2fa3efa..a450fae 100644 (file)
@@ -116,6 +116,7 @@ void test_print_info(void)
 
 void test_page_coloring(void) 
 {
+/*
        //Print the different cache properties of our machine
        print_cache_properties("L1", l1);
        cprintf("\n");
@@ -143,7 +144,7 @@ void test_page_coloring(void)
        cprintf("Allocating from L1 page colors:\n");
        for(int i=0; i<get_cache_num_page_colors(l1); i++) {
                cprintf("  COLOR %d:\n", i);
-               while(l1_page_alloc(&page, i) != -ENOMEM)
+               while(colored_page_alloc(l1, &page, i) != -ENOMEM)
                        cprintf("    Page: %d\n", page2ppn(page));
        }
 
@@ -154,7 +155,7 @@ void test_page_coloring(void)
        cprintf("Allocating from L2 page colors:\n");
        for(int i=0; i<get_cache_num_page_colors(l2); i++) {
                cprintf("  COLOR %d:\n", i);
-               while(l2_page_alloc(&page, i) != -ENOMEM)
+               while(colored_page_alloc(l2, &page, i) != -ENOMEM)
                        cprintf("    Page: %d\n", page2ppn(page));
        }
 
@@ -165,7 +166,7 @@ void test_page_coloring(void)
        cprintf("Allocating from L3 page colors:\n");
        for(int i=0; i<get_cache_num_page_colors(l3); i++) {
                cprintf("  COLOR %d:\n", i);
-               while(l3_page_alloc(&page, i) != -ENOMEM)
+               while(colored_page_alloc(l3, &page, i) != -ENOMEM)
                        cprintf("    Page: %d\n", page2ppn(page));
        }
        
@@ -174,10 +175,10 @@ void test_page_coloring(void)
        
        //Run through and allocate all pages through page_alloc
        cprintf("Allocating from global allocator:\n");
-       while(page_alloc(&page) != -ENOMEM)
+       while(upage_alloc(&page) != -ENOMEM)
                cprintf("    Page: %d\n", page2ppn(page));
        
-       if(l2_page_alloc(&page, 0) != -ENOMEM)
+       if(colored_page_alloc(l2, &page, 0) != -ENOMEM)
                cprintf("Should not get here, all pages should already be gone!\n");
        cprintf("All pages gone for sure...\n");
        
@@ -189,36 +190,41 @@ void test_page_coloring(void)
        page_free(&pages[6]);
        page_free(&pages[4]);
 
-       while(page_alloc(&page) != -ENOMEM)
+       while(upage_alloc(&page) != -ENOMEM)
                cprintf("Page: %d\n", page2ppn(page));  
+       
+       page_init();
+*/
 }
 
 void test_color_alloc() {
        size_t checkpoint = 0;
-       struct proc* p = kfs_proc_create(kfs_lookup_path("parlib_matrix"));
-       cache_color_alloc(l2, p);
-       cache_color_alloc(l3, p);
-       cache_color_alloc(l3, p);
-       cache_color_alloc(l2, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(l2, p);
-       cache_color_free(llc_cache, p);
-       cache_color_free(llc_cache, p);
+       uint8_t* colors_map = kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
+       cache_color_alloc(l2, colors_map);
+       cache_color_alloc(l3, colors_map);
+       cache_color_alloc(l3, colors_map);
+       cache_color_alloc(l2, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(l2, colors_map);
+       cache_color_free(llc_cache, colors_map);
+       cache_color_free(llc_cache, colors_map);
+
+print_cache_colors:
        printk("L1 free colors, tot colors: %d\n", l1->num_colors);
        PRINT_BITMASK(l1->free_colors_map, l1->num_colors);
        printk("L2 free colors, tot colors: %d\n", l2->num_colors);
@@ -226,7 +232,7 @@ void test_color_alloc() {
        printk("L3 free colors, tot colors: %d\n", l3->num_colors);
        PRINT_BITMASK(l3->free_colors_map, l3->num_colors);
        printk("Process allocated colors\n");
-       PRINT_BITMASK(p->cache_colors_map, llc_cache->num_colors);
+       PRINT_BITMASK(colors_map, llc_cache->num_colors);
        printk("test_color_alloc() complete!\n");
 }
 
index 29ce67d..c05e870 100644 (file)
@@ -26,6 +26,7 @@ USER_APPS_PARLIB_LDOBJS    := $(OBJDIR)/$(USER_PARLIB_ARCH_SRC_DIR)/entry.o \
                               $(OBJDIR)/$(USER_APPS_PARLIB_DIR)/clrscrn.o \
                               $(OBJDIR)/$(USER_APPS_PARLIB_DIR)/draw_nanwan.o \
                               $(OBJDIR)/$(USER_APPS_PARLIB_DIR)/run_binary.o \
+                              $(OBJDIR)/$(USER_APPS_PARLIB_DIR)/run_binary_colored.o \
                               $(OBJDIR)/$(USER_APPS_PARLIB_DIR)/change_user.o  
 
 
index 1aa741f..3513ab3 100644 (file)
@@ -12,6 +12,7 @@ extern void set_default_user();
 extern void file_io();
 extern void file_error();
 extern void run_binary();
+extern void run_binary_colored();
 extern char prompt[256];
 
 void help() {
@@ -22,6 +23,7 @@ void help() {
            "  file_io:          Run File Related IO Tests\n"
            "  file_error:       Run File Error Related Tests\n"
            "  run_binary:       Load and run a binary located on the remote server\n"
+           "  run_binary_colored:       Load and run a binary located on the remote server with a specified number of page colors\n"
              );
 }
 
@@ -49,6 +51,8 @@ int main(int argc, char** argv)
                        file_error();
                else if (strcmp(s, "run_binary") == 0)
                        run_binary();
+               else if (strcmp(s, "run_binary_colored") == 0)
+                       run_binary_colored();
                else
                        help(); 
 
index 5f4e159..ebf0e21 100644 (file)
@@ -63,7 +63,7 @@ void run_binary()
                if(bytes_read < 0) { read_error(binary_buf, fd); return; }
        }
        printf("Loading Binary: %s, ROMSIZE: %d\n", readline_result, total_bytes_read);
-       ssize_t error = sys_run_binary(binary_buf, NULL, total_bytes_read);
+       ssize_t error = sys_run_binary(binary_buf, NULL, total_bytes_read, 0);
        if(error < 0) {
                fprintf(stderr, "Error: Unable to run remote binary\n");
        }
diff --git a/user/apps/parlib/run_binary_colored.c b/user/apps/parlib/run_binary_colored.c
new file mode 100644 (file)
index 0000000..0055e3a
--- /dev/null
@@ -0,0 +1,85 @@
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <parlib.h>
+
+extern char * readline(const char *prompt);
+
+#define READ_SIZE       1024
+uint8_t* binary_buf;
+
+static void fd_error() {
+       fprintf(stderr, "Error: Unable to run remote binary (fd error): %s\n", 
+                                                             strerror(errno));
+}
+
+static void malloc_error() {
+       fprintf(stderr, 
+               "Error: Unable to run remote binary: No more memory avaialable!\n");
+}
+
+static void read_error(void* buf, int fd) {
+       free(binary_buf);
+       close(fd);
+       fprintf(stderr, "Error: Unable to run remote binary (read error): %s\n", 
+                                                               strerror(errno));
+}
+
+static void realloc_error(void* buf, int fd) {
+       free(binary_buf);
+       close(fd);
+       fprintf(stderr, 
+               "Error: Unable to run remote binary: No more memory available!\n");
+}
+
+void run_binary_colored()
+{      
+       char* name = readline("\nEnter name of binary to execute: ");
+       if (name == NULL) {
+               printf("Error reading from console.\n");
+               return;
+       }
+       char * file_name = malloc(strlen(name) + 8);
+       sprintf(file_name, "./apps/%s", name);
+       int fd = open(file_name, O_RDONLY, 0);
+       free(file_name);
+       if(fd < 0) { fd_error(); return; };
+
+       char* colors = readline("\nEnter number of colors: ");
+       if (colors == NULL) {
+               printf("Error reading from console.\n");
+               return;
+       }
+       size_t num_colors = atoi(colors);
+       
+       int iters = 1;
+       binary_buf = malloc(READ_SIZE);
+       if(binary_buf == NULL) { malloc_error(); return; }
+       
+       int total_bytes_read = 0;
+       int bytes_read = read(fd, binary_buf, READ_SIZE);
+       if(bytes_read < 0) { read_error(binary_buf, fd); return; }
+       
+       while(bytes_read > 0) {
+               total_bytes_read += bytes_read; 
+               void* temp_buf = realloc(binary_buf, READ_SIZE*(++iters));
+               if(temp_buf == NULL) { realloc_error(binary_buf, fd); return; } 
+               binary_buf = temp_buf;
+               bytes_read = read(fd, binary_buf+total_bytes_read, READ_SIZE);
+               if(bytes_read < 0) { read_error(binary_buf, fd); return; }
+       }
+       printf("Loading Binary: %s, ROMSIZE: %d\n", name, total_bytes_read);
+       ssize_t error = sys_run_binary(binary_buf, NULL, 
+                           total_bytes_read, num_colors);
+       if(error < 0) {
+               fprintf(stderr, "Error: Unable to run remote binary\n");
+       }
+       free(binary_buf);
+       close(fd);
+}
+
index 050135d..7fbc6df 100644 (file)
@@ -32,7 +32,8 @@ ssize_t     sys_serial_write(void* buf, size_t len);
 ssize_t     sys_serial_read(void* buf, size_t len);
 ssize_t     sys_eth_write(void *COUNT(len) buf, size_t len); 
 ssize_t     sys_eth_read(void *COUNT(len) buf, size_t len);
-ssize_t     sys_run_binary(void* binary_buf, void* arg, size_t len);
+ssize_t     sys_run_binary(void* binary_buf, void* arg, size_t len, 
+                                                 size_t num_colors);
 int         sys_getpid(void);
 size_t      sys_getcpuid(void);
 error_t     sys_brk(void* addr);
index 2e12367..87dc89c 100644 (file)
@@ -58,9 +58,11 @@ ssize_t sys_serial_read(void* buf, size_t len)
 }
 
 //Run a binary loaded at the specificed address with the specified arguments
-ssize_t sys_run_binary(void* binary_buf, void* arg, size_t len) 
+ssize_t sys_run_binary(void* binary_buf, void* arg, size_t len, 
+                                              size_t num_colors) 
 {
-       return syscall(SYS_run_binary, (intreg_t)binary_buf, (intreg_t)arg, len, 0, 0);
+       return syscall(SYS_run_binary, (intreg_t)binary_buf, (intreg_t)arg, len, 
+                                                                 num_colors, 0);
 }
 
 //Write a buffer over ethernet