Remove page coloring
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 18 Aug 2016 16:02:22 +0000 (12:02 -0400)
committerBarret Rhoden <brho@cs.berkeley.edu>
Tue, 29 Nov 2016 16:27:40 +0000 (11:27 -0500)
Page coloring doesn't work with contiguous memory allocators, and it
partitions all levels of the cache hierarchy, which doesn't work well with
spatial partitioning.  For instance, if we partition the L3 into 8 colors
(the number is based on the cache properties), we might be partitioning the
L1 and L2 into two colors (again, based on cache properties).  Although we
now have cache isolation in the shared LLC, we also partition a cache that
is already per-core.

The better approach is to use some sort of hardware support, such as
Intel's Cache Allocation Technology.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
24 files changed:
Kconfig
config-default
kern/arch/riscv/Kbuild
kern/arch/riscv/colored_caches.c [deleted file]
kern/arch/riscv/colored_page_alloc.h [deleted file]
kern/arch/riscv/page_alloc.c
kern/arch/x86/Kbuild
kern/arch/x86/colored_caches.c [deleted file]
kern/arch/x86/colored_page_alloc.h [deleted file]
kern/arch/x86/page_alloc.c
kern/include/colored_caches.h [deleted file]
kern/include/colored_page_alloc.h [deleted file]
kern/include/env.h
kern/include/linux/compat_todo.h
kern/include/page_alloc.h
kern/src/Kbuild
kern/src/colored_caches.c [deleted file]
kern/src/init.c
kern/src/ktest/pb_ktests.c
kern/src/manager.c
kern/src/monitor.c
kern/src/page_alloc.c
kern/src/process.c
kern/src/syscall.c

diff --git a/Kconfig b/Kconfig
index 5844844..3635aef 100644 (file)
--- a/Kconfig
+++ b/Kconfig
@@ -95,25 +95,6 @@ config COREALLOC_FCFS
 
 endchoice
 
-menu "Memory Management"
-
-config PAGE_COLORING
-       bool "Page coloring"
-       default n
-       help
-               Turns on internal support for page coloring.  When turned off, the page
-               allocator acts like there is only one color.  Hasn't been used in a few
-               years.
-
-config BOXBORO
-       depends on PAGE_COLORING
-       bool "Boxboro-specific page coloring"
-       default n
-       help
-               Say 'n'.
-
-endmenu
-
 menu "Kernel Debugging"
 
 menu "Per-cpu Tracers"
index 4aba9d8..f69b692 100644 (file)
@@ -36,12 +36,6 @@ CONFIG_KFS_CPIO_BIN=""
 # CONFIG_EXT2FS is not set
 
 #
-# Memory Management
-#
-# CONFIG_DEMAND_PAGING is not set
-# CONFIG_PAGE_COLORING is not set
-
-#
 # Kernel Debugging
 #
 # CONFIG_SPINLOCK_DEBUG is not set
index 298a172..395577f 100644 (file)
@@ -1,6 +1,5 @@
 obj-y                                          += boot.o
 obj-y                                          += cboot.o
-obj-y                                          += colored_caches.o
 obj-y                                          += console.o
 obj-y                                          += cpuinfo.o
 obj-y                                          += entry.o
diff --git a/kern/arch/riscv/colored_caches.c b/kern/arch/riscv/colored_caches.c
deleted file mode 100644 (file)
index ce4a714..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- *
- * Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#include <colored_caches.h>
-#include <stdio.h>
-
-// Global variables
-static cache_t l1,l2,l3;
-cache_t* llc_cache;
-available_caches_t available_caches;
-
-/************** Cache Related Functions  *****************/
-void cache_init()
-{
-       // Initialize the caches available on this system.
-       // TODO: Should call out to something reading the acpi tables from
-       // memory, or something similar.  For now, just initialize them inline
-       available_caches.l1 = &l1;
-       available_caches.l2 = &l2;
-       available_caches.l3 = &l3;
-       llc_cache = &l3;
-       init_cache_properties(&l1,   32,  8, 64);
-       init_cache_properties(&l2,  256,  8, 64);
-       init_cache_properties(&l3, 8192, 16, 64);
-       printk("Cache init successful\n");
-}
-
-void cache_color_alloc_init()
-{
-       init_free_cache_colors_map(&l1);
-       init_free_cache_colors_map(&l2);
-       init_free_cache_colors_map(&l3);
-}
-
diff --git a/kern/arch/riscv/colored_page_alloc.h b/kern/arch/riscv/colored_page_alloc.h
deleted file mode 100644 (file)
index 0b629f1..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- */
-/**
- * @author Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#pragma once
-
-/********** Page Coloring Related Macros ************/
-// Define these to make sure that each level of the cache
-// is initialized and managed properly
-#define DECLARE_CACHE_COLORED_PAGE_LINKS()                    \
-       DECLARE_CACHE_COLORED_PAGE_LINK(l1)                       \
-       DECLARE_CACHE_COLORED_PAGE_LINK(l2)                       \
-       DECLARE_CACHE_COLORED_PAGE_LINK(l3)
-
-#define DECLARE_CACHE_COLORED_PAGE_FREE_LISTS()               \
-       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l1)                  \
-       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l2)                  \
-       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l3)
-
-#define DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LISTS()        \
-       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l1)           \
-       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l2)           \
-       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l3)
-
-#define DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTIONS()          \
-       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l1)             \
-       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l2)             \
-       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l3)
-
-#define INIT_CACHE_COLORED_PAGE_FREE_LISTS()                  \
-       INIT_CACHE_COLORED_PAGE_FREE_LIST(l1)                     \
-       INIT_CACHE_COLORED_PAGE_FREE_LIST(l2)                     \
-       INIT_CACHE_COLORED_PAGE_FREE_LIST(l3)
-
-#define REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LISTS(page)      \
-       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l1)       \
-       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l2)       \
-       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l3)
-
-#define INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LISTS(page)      \
-       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l1)       \
-       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l2)       \
-       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l3)
index bd997b0..71720c5 100644 (file)
 #include <pmap.h>
 #include <kmalloc.h>
 #include <multiboot.h>
-#include <colored_caches.h>
-
-page_list_t* colored_page_free_list = NULL;
-spinlock_t colored_page_free_list_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 /*
  * Initialize the memory free lists.
@@ -25,13 +21,6 @@ void page_alloc_init(struct multiboot_info *mbi)
 {
        init_once_racy(return);
 
-       size_t list_size = llc_cache->num_colors*sizeof(page_list_t);;
-       page_list_t* lists = (page_list_t*)boot_alloc(list_size, PGSIZE);
-
-       size_t num_colors = llc_cache->num_colors;
-       for (size_t i = 0; i < num_colors; i++)
-               BSD_LIST_INIT(&lists[i]);
-
        uintptr_t first_free_page = ROUNDUP(boot_freemem, PGSIZE);
        uintptr_t first_invalid_page = LA2PPN(boot_freelimit);
        assert(first_invalid_page == max_nr_pages);
@@ -39,11 +28,8 @@ void page_alloc_init(struct multiboot_info *mbi)
        // append other pages to the free lists
        for (uintptr_t page = first_free_page; page < first_invalid_page; page++)
        {
-               BSD_LIST_INSERT_HEAD(&lists[page & (num_colors-1)], &pages[page],
-                                    pg_link);
+               BSD_LIST_INSERT_HEAD(&page_free_list, &pages[page], pg_link);
                &pages[page]->pg_is_free = TRUE;
        }
        nr_free_pages = first_invalid_page - first_free_page;
-
-       colored_page_free_list = lists;
 }
index 142531c..318bdaa 100644 (file)
@@ -1,6 +1,5 @@
 obj-y                                          += apic.o
 obj-y                                          += apic9.o
-obj-y                                          += colored_caches.o
 obj-y                                          += console.o
 obj-y                                          += cpuinfo.o
 obj-y                                          += devarch.o
diff --git a/kern/arch/x86/colored_caches.c b/kern/arch/x86/colored_caches.c
deleted file mode 100644 (file)
index 11c4259..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- *
- * Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#include <error.h>
-#include <bitmask.h>
-#include <colored_caches.h>
-#include <process.h>
-
-// Static global variable of caches to assign to the available caches struct
-static cache_t l1,l2,l3;
-
-// Convenient global variable for accessing the last level cache
-cache_t* llc_cache;
-
-// Global variables
-available_caches_t available_caches;
-
-/************** Cache Related Functions  *****************/
-void cache_init()
-{
-       // Initialize the caches available on this system.
-       // TODO: Should call out to something reading the acpi tables from
-       // memory, or something similar.  For now, just initialize them inline
-       available_caches.l1 = &l1;
-       available_caches.l2 = &l2;
-       available_caches.l3 = &l3;
-       llc_cache = &l3;
-#ifdef CONFIG_BOXBORO
-       /* level (ignoring L1I), size, ways, CL size) */
-       init_cache_properties(&l1,   32,  8, 64);       /* 1 color */
-       init_cache_properties(&l2,  256,  8, 64);       /* 16 colors */
-       init_cache_properties(&l3, 24576, 24, 64);      /* 256 colors */
-#else /* Core i7 */
-       init_cache_properties(&l1,   32,  8, 64);       /* 1 color */
-       init_cache_properties(&l2,  256,  8, 64);       /* 16 colors */
-       init_cache_properties(&l3, 8192, 16, 64);       /* 128 colors */
-#endif /* CONFIG_E1000_ON_BOXBORO */
-       printk("Cache init successful\n");
-}
-
-void cache_color_alloc_init()
-{
-       init_free_cache_colors_map(&l1);
-       init_free_cache_colors_map(&l2);
-       init_free_cache_colors_map(&l3);
-}
-
diff --git a/kern/arch/x86/colored_page_alloc.h b/kern/arch/x86/colored_page_alloc.h
deleted file mode 100644 (file)
index 0b629f1..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- */
-/**
- * @author Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#pragma once
-
-/********** Page Coloring Related Macros ************/
-// Define these to make sure that each level of the cache
-// is initialized and managed properly
-#define DECLARE_CACHE_COLORED_PAGE_LINKS()                    \
-       DECLARE_CACHE_COLORED_PAGE_LINK(l1)                       \
-       DECLARE_CACHE_COLORED_PAGE_LINK(l2)                       \
-       DECLARE_CACHE_COLORED_PAGE_LINK(l3)
-
-#define DECLARE_CACHE_COLORED_PAGE_FREE_LISTS()               \
-       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l1)                  \
-       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l2)                  \
-       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l3)
-
-#define DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LISTS()        \
-       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l1)           \
-       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l2)           \
-       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l3)
-
-#define DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTIONS()          \
-       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l1)             \
-       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l2)             \
-       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l3)
-
-#define INIT_CACHE_COLORED_PAGE_FREE_LISTS()                  \
-       INIT_CACHE_COLORED_PAGE_FREE_LIST(l1)                     \
-       INIT_CACHE_COLORED_PAGE_FREE_LIST(l2)                     \
-       INIT_CACHE_COLORED_PAGE_FREE_LIST(l3)
-
-#define REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LISTS(page)      \
-       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l1)       \
-       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l2)       \
-       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l3)
-
-#define INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LISTS(page)      \
-       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l1)       \
-       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l2)       \
-       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l3)
index 747cb15..6e2c939 100644 (file)
 #include <kmalloc.h>
 #include <multiboot.h>
 
-spinlock_t colored_page_free_list_lock = SPINLOCK_INITIALIZER_IRQSAVE;
-
-page_list_t *colored_page_free_list = NULL;
-
-static void page_alloc_bootstrap() {
-       // Allocate space for the array required to manage the free lists
-       size_t list_size = llc_cache->num_colors*sizeof(page_list_t);
-       page_list_t *tmp = (page_list_t*)boot_alloc(list_size,PGSIZE);
-       colored_page_free_list = tmp;
-       for (int i = 0; i < llc_cache->num_colors; i++)
-               BSD_LIST_INIT(&colored_page_free_list[i]);
-}
-
 /* Can do whatever here.  For now, our page allocator just works with colors,
  * not NUMA zones or anything. */
 static void track_free_page(struct page *page)
 {
-       BSD_LIST_INSERT_HEAD(&colored_page_free_list[get_page_color(page2ppn(page),
-                                                                   llc_cache)],
-                        page, pg_link);
+       BSD_LIST_INSERT_HEAD(&page_free_list, page, pg_link);
        nr_free_pages++;
        page->pg_is_free = TRUE;
 }
@@ -206,7 +191,6 @@ static void account_for_pages(physaddr_t boot_freemem_paddr)
 /* Initialize the memory free lists.  After this, do not use boot_alloc. */
 void page_alloc_init(struct multiboot_info *mbi)
 {
-       page_alloc_bootstrap();
        /* First, all memory is busy / not free by default.
         *
         * To avoid a variety of headaches, any memory below 1MB is considered busy.
diff --git a/kern/include/colored_caches.h b/kern/include/colored_caches.h
deleted file mode 100644 (file)
index 45d05a8..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- *
- * Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#pragma once
-
-#include <ros/common.h>
-#include <error.h>
-#include <atomic.h>
-
-/****************** Cache Structures ********************/
-typedef struct Cache {
-       size_t wa;
-       size_t sz_k;
-       size_t clsz;
-       uint8_t* free_colors_map;
-
-       //Added as optimization (derived from above);
-       size_t num_colors;
-} cache_t;
-
-typedef struct AvailableCaches {
-       cache_t* l1;
-       cache_t* l2;
-       cache_t* l3;
-} available_caches_t;
-
-/******** Externally visible global variables ************/
-extern available_caches_t available_caches;
-extern cache_t* llc_cache;
-extern spinlock_t cache_colors_lock;
-
-/************** Cache Related Functions  *****************/
-void cache_init();
-void cache_color_alloc_init();
-void init_cache_properties(cache_t *c, size_t sz_k, size_t wa, size_t clsz);
-void init_free_cache_colors_map(cache_t* c);
-size_t get_offset_in_cache_line(uintptr_t addr, cache_t *c);
-void print_cache_properties(char *lstring, cache_t *c);
-
-static inline size_t get_page_color(uintptr_t page, cache_t *c) {
-    return (page & (c->num_colors-1));
-}
-
-
-uint8_t* cache_colors_map_alloc();
-void cache_colors_map_free(uint8_t* colors_map);
-error_t cache_color_alloc(cache_t* c, uint8_t* colors_map);
-error_t cache_color_alloc_specific(size_t color, cache_t* c,
-                                         uint8_t* colors_map);
-void cache_color_free(cache_t* c, uint8_t* colors_map);
-void cache_color_free_specific(size_t color, cache_t* c, uint8_t* colors_map);
-
-/****************** Cache Properties *********************/
-inline size_t get_cache_ways_associative(cache_t *c);
-inline size_t get_cache_line_size_bytes(cache_t *c);
-inline size_t get_cache_size_bytes(cache_t *c);
-inline size_t get_cache_size_kilobytes(cache_t *c);
-inline size_t get_cache_size_megabytes(cache_t *c);
-inline size_t get_cache_num_offset_bits(cache_t *c);
-inline size_t get_cache_num_index_bits(cache_t *c);
-inline size_t get_cache_num_tag_bits(cache_t *c);
-inline size_t get_cache_num_page_color_bits(cache_t *c);
-inline size_t get_cache_bytes_per_line(cache_t *c);
-inline size_t get_cache_num_lines(cache_t *c);
-inline size_t get_cache_num_sets(cache_t *c);
-inline size_t get_cache_lines_per_set(cache_t *c);
-inline size_t get_cache_lines_per_page(cache_t *c);
-inline size_t get_cache_bytes_per_way(cache_t *c);
-inline size_t get_cache_lines_per_way(cache_t *c);
-inline size_t get_cache_pages_per_way(cache_t *c);
-inline size_t get_cache_num_page_colors(cache_t *c);
diff --git a/kern/include/colored_page_alloc.h b/kern/include/colored_page_alloc.h
deleted file mode 100644 (file)
index dce91af..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- */
-/**
- * @author Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#pragma once
-
-#include <colored_caches.h>
-#include <arch/colored_page_alloc.h>
-#include <stdio.h>
-
-#define DECLARE_CACHE_COLORED_PAGE_LINK(_cache)                               \
-       page_list_entry_t _cache##_cache_colored_pg_link;
-
-#define DECLARE_CACHE_COLORED_PAGE_FREE_LIST(_cache)                          \
-       uint8_t _cache##_num_colors = 0;                                          \
-       page_list_t *_cache##_cache_colored_page_list = NULL;
-
-#define DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(_cache)                   \
-    extern uint8_t _cache##_num_colors;                                       \
-       extern page_list_t *_cache##_cache_colored_page_list;
-
-#define DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(_cache)                     \
-error_t _cache##_page_alloc(page_t** page, size_t color)                      \
-{                                                                             \
-       /*      TODO: Put a lock around this */                                       \
-       if(available_caches._cache && !LIST_EMPTY(&(_cache##_cache_colored_page_list)[(color)])) {           \
-               *(page) = LIST_FIRST(&(_cache##_cache_colored_page_list)[(color)]);   \
-               LIST_REMOVE(*page, global_link);                                      \
-               REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LISTS(page);                     \
-               page_clear(*page);                                                    \
-               return ESUCCESS;                                                      \
-       }                                                                         \
-       return -ENOMEM;                                                           \
-}
-
-#define INIT_CACHE_COLORED_PAGE_FREE_LIST(_cache)                             \
-{                                                                             \
-       if(available_caches._cache == TRUE) {                                     \
-           _cache##_num_colors = get_cache_num_page_colors(&(_cache));           \
-           assert(((_cache##_num_colors) & ((_cache##_num_colors)-1)) == 0);     \
-           size_t list_size = _cache##_num_colors*sizeof(page_list_t);           \
-           _cache##_cache_colored_page_list                                      \
-              = (page_list_t*) boot_alloc(list_size, PGSIZE);                    \
-               for(int i=0; i<_cache##_num_colors; i++) {                            \
-                       LIST_INIT(&(_cache##_cache_colored_page_list[i]));                \
-               }                                                                     \
-       }                                                                         \
-}
-
-#define REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(_page, _cache)              \
-       if(available_caches._cache == TRUE)                                       \
-               LIST_REMOVE(*(_page), _cache##_cache_colored_pg_link);
-
-
-#define INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(_page, _cache)              \
-       if(available_caches._cache == TRUE) {                                     \
-               LIST_INSERT_HEAD(                                                     \
-                  &(_cache##_cache_colored_page_list                                 \
-                        [get_page_color(page2ppn((_page)), &(_cache))]),             \
-                  (_page),                                                           \
-                  _cache##_cache_colored_pg_link                                   \
-               );                                                                    \
-       }
index e1270ed..1a33808 100644 (file)
@@ -64,11 +64,6 @@ struct proc {
        /* Scheduler mgmt (info, data, whatever) */
        struct sched_proc_data ksched_data;
 
-       /* Cache color map: bitmap of the cache colors currently allocated to this
-        * process */
-       uint8_t* cache_colors_map;
-       size_t next_cache_color;
-
        /* The args_base pointer is a user pointer which points to the base of
         * the executable boot block (where args, environment, aux vectors, ...)
         * are stored.
index 2853b89..d1a107b 100644 (file)
@@ -838,7 +838,9 @@ static inline void pci_unmap_sg(struct pci_device *pdev, struct scatterlist *sg,
        dma_unmap_sg(NULL, sg, nents, (enum dma_data_direction)direction);
 }
 
-#define cache_line_size() get_cache_line_size_bytes(available_caches.l1)
+/* TODO: get this in an arch-dependent manner.  On x86, be careful of adjacent
+ * cacheline prefetching. */
+#define cache_line_size() 64
 
 static inline void *lowmem_page_address(struct page *page)
 {
index 65e8060..5a6b602 100644 (file)
@@ -11,7 +11,6 @@
 #include <sys/queue.h>
 #include <error.h>
 #include <arch/mmu.h>
-#include <colored_page_alloc.h>
 #include <process.h>
 #include <kref.h>
 #include <kthread.h>
@@ -53,20 +52,16 @@ struct page {
 };
 
 /******** Externally visible global variables ************/
-extern uint8_t* global_cache_colors_map;
-extern spinlock_t colored_page_free_list_lock;
-extern page_list_t *colored_page_free_list;
+extern spinlock_t page_list_lock;
+extern page_list_t page_free_list;
 
 /*************** Functional Interface *******************/
 void page_alloc_init(struct multiboot_info *mbi);
-void colored_page_alloc_init(void);
 
-error_t upage_alloc(struct proc* p, page_t **page, int zero);
+error_t upage_alloc(struct proc *p, page_t **page, bool zero);
 error_t kpage_alloc(page_t **page);
 void *kpage_alloc_addr(void);
 void *kpage_zalloc_addr(void);
-error_t upage_alloc_specific(struct proc* p, page_t **page, size_t ppn);
-error_t kpage_alloc_specific(page_t **page, size_t ppn);
 
 void *get_cont_pages(size_t order, int flags);
 void *get_cont_pages_node(int node, size_t order, int flags);
index 07b6f24..71a3265 100644 (file)
@@ -27,7 +27,6 @@ obj-y                                         += bitmap.o
 obj-y                                          += blockdev.o
 obj-y                                          += build_info.o
 obj-y                                          += ceq.o
-obj-y                                          += colored_caches.o
 obj-y                                          += completion.o
 obj-y                                          += coreprov.o
 obj-y                                          += ctype.o
diff --git a/kern/src/colored_caches.c b/kern/src/colored_caches.c
deleted file mode 100644 (file)
index 22b650f..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-/* Copyright (c) 2009 The Regents of the University  of California.
- * See the COPYRIGHT files at the top of this source tree for full
- * license information.
- *
- * Kevin Klues <klueska@cs.berkeley.edu>
- */
-
-#include <ros/common.h>
-#include <arch/mmu.h>
-#include <bitmask.h>
-#include <colored_caches.h>
-#include <stdio.h>
-#include <atomic.h>
-#include <kmalloc.h>
-#include <page_alloc.h>
-
-#define l1 (available_caches.l1)
-#define l2 (available_caches.l2)
-#define l3 (available_caches.l3)
-
-spinlock_t cache_colors_lock = SPINLOCK_INITIALIZER_IRQSAVE;
-
-/************** Cache Related Functions  *****************/
-inline void init_cache_properties(cache_t *c, size_t sz_k, size_t wa, size_t clsz) {
-       c->wa = wa;
-       c->sz_k = sz_k;
-       c->clsz = clsz;
-
-#ifdef CONFIG_PAGE_COLORING
-       //Added as optimization (derived from above);
-       size_t nc = get_cache_num_page_colors(c);
-       c->num_colors = nc;
-#else
-       c->num_colors = 1;
-#endif
-}
-
-inline void init_free_cache_colors_map(cache_t* c)
-{
-       // Initialize the free colors map
-       c->free_colors_map = kmalloc(c->num_colors, 0);
-       FILL_BITMASK(c->free_colors_map, c->num_colors);
-}
-
-inline size_t get_offset_in_cache_line(uintptr_t addr, cache_t *c) {
-    return (addr % get_cache_bytes_per_line(c));
-}
-
-void print_cache_properties(char *lstring, cache_t *c)
-{
-       printk("%s_WAYS_ASSOCIATIVE: %ld\n", lstring, get_cache_ways_associative(c));
-       printk("%s_LINE_SIZE_BYTES: %ld\n", lstring, get_cache_line_size_bytes(c));
-       printk("%s_SIZE_BYTES: %ld\n", lstring, get_cache_size_bytes(c));
-       printk("%s_SIZE_KILOBYTES: %ld\n", lstring, get_cache_size_kilobytes(c));
-       printk("%s_SIZE_MEGABYTES: %ld\n", lstring, get_cache_size_megabytes(c));
-       printk("%s_OFFSET_BITS: %ld\n", lstring, get_cache_num_offset_bits(c));
-       printk("%s_INDEX_BITS: %ld\n", lstring, get_cache_num_index_bits(c));
-       printk("%s_TAG_BITS: %ld\n", lstring, get_cache_num_tag_bits(c));
-       printk("%s_PAGE_COLOR_BITS: %ld\n", lstring, get_cache_num_page_color_bits(c));
-       printk("%s_BYTES_PER_LINE: %ld\n", lstring, get_cache_bytes_per_line(c));
-       printk("%s_NUM_LINES: %ld\n", lstring, get_cache_num_lines(c));
-       printk("%s_NUM_SETS: %ld\n", lstring, get_cache_num_sets(c));
-       printk("%s_LINES_PER_SET: %ld\n", lstring, get_cache_lines_per_set(c));
-       printk("%s_LINES_PER_PAGE: %ld\n", lstring, get_cache_lines_per_page(c));
-       printk("%s_BYTES_PER_WAY: %ld\n", lstring, get_cache_bytes_per_way(c));
-       printk("%s_LINES_PER_WAY: %ld\n", lstring, get_cache_lines_per_way(c));
-       printk("%s_PAGES_PER_WAY: %ld\n", lstring, get_cache_pages_per_way(c));
-       printk("%s_NUM_PAGE_COLORS: %ld\n", lstring, get_cache_num_page_colors(c));
-}
-
-/****************** Cache Properties *********************/
-inline size_t get_cache_ways_associative(cache_t *c) {
-       return (c->wa);
-}
-inline size_t get_cache_line_size_bytes(cache_t *c) {
-       return (c->clsz);
-}
-inline size_t get_cache_size_bytes(cache_t *c) {
-       return (c->sz_k * ONE_KILOBYTE);
-}
-inline size_t get_cache_size_kilobytes(cache_t *c) {
-       return (c->sz_k);
-}
-inline size_t get_cache_size_megabytes(cache_t *c) {
-       return (c->sz_k / ONE_KILOBYTE);
-}
-inline size_t get_cache_num_offset_bits(cache_t *c) {
-       return (LOG2_UP(get_cache_line_size_bytes(c)));
-}
-inline size_t get_cache_num_index_bits(cache_t *c) {
-       return (LOG2_UP(get_cache_size_bytes(c)
-                   / get_cache_ways_associative(c)
-                   / get_cache_line_size_bytes(c)));
-}
-inline size_t get_cache_num_tag_bits(cache_t *c) {
-       return (NUM_ADDR_BITS - get_cache_num_offset_bits(c)
-                          - get_cache_num_index_bits(c));
-}
-inline size_t get_cache_num_page_color_bits(cache_t *c) {
-       return (get_cache_num_offset_bits(c)
-                  + get_cache_num_index_bits(c)
-                  - PGSHIFT);
-}
-inline size_t get_cache_bytes_per_line(cache_t *c) {
-       return (1 << get_cache_num_offset_bits(c));
-}
-inline size_t get_cache_num_lines(cache_t *c) {
-       return (get_cache_size_bytes(c)/get_cache_bytes_per_line(c));
-}
-inline size_t get_cache_num_sets(cache_t *c) {
-       return (get_cache_num_lines(c)/get_cache_ways_associative(c));
-}
-inline size_t get_cache_lines_per_set(cache_t *c) {
-       return (get_cache_ways_associative(c));
-}
-inline size_t get_cache_lines_per_page(cache_t *c) {
-       return (PGSIZE / get_cache_bytes_per_line(c));
-}
-inline size_t get_cache_bytes_per_way(cache_t *c) {
-       return (get_cache_size_bytes(c)/get_cache_ways_associative(c));
-}
-inline size_t get_cache_lines_per_way(cache_t *c) {
-       return (get_cache_num_lines(c)/get_cache_ways_associative(c));
-}
-inline size_t get_cache_pages_per_way(cache_t *c) {
-       return (get_cache_lines_per_way(c)/get_cache_lines_per_page(c));
-}
-inline size_t get_cache_num_page_colors(cache_t *c) {
-       return get_cache_pages_per_way(c);
-}
-
-static inline void set_color_range(uint16_t color, uint8_t* map,
-                                   cache_t* smaller, cache_t* bigger)
-{
-       size_t base, r;
-       if(smaller->num_colors <= bigger->num_colors) {
-               r = bigger->num_colors / smaller->num_colors;
-               base = color*r;
-               SET_BITMASK_RANGE(map, base, base+r);
-       }
-       else {
-               r = smaller->num_colors / bigger->num_colors;
-               base = color/r;
-               if(BITMASK_IS_SET_IN_RANGE(smaller->free_colors_map,
-                                          base*r, base*r+r-1))
-                       SET_BITMASK_BIT(map, base);
-       }
-}
-
-static inline void clr_color_range(uint16_t color, uint8_t* map,
-                                   cache_t* smaller, cache_t* bigger)
-{
-       size_t base, r;
-       if(smaller->num_colors <= bigger->num_colors) {
-               r = bigger->num_colors / smaller->num_colors;
-               base = color*r;
-               CLR_BITMASK_RANGE(map, base, base+r);
-       }
-       else {
-               r = smaller->num_colors / bigger->num_colors;
-               base = color/r;
-               CLR_BITMASK_BIT(map, base);
-       }
-}
-
-static inline error_t __cache_color_alloc_specific(size_t color, cache_t* c,
-                                                         uint8_t* colors_map)
-{
-       if(!GET_BITMASK_BIT(c->free_colors_map, color))
-               return -ENOCACHE;
-
-       if(l1)
-               clr_color_range(color, l1->free_colors_map, c, l1);
-       if(l2)
-               clr_color_range(color, l2->free_colors_map, c, l2);
-       if(l3)
-               clr_color_range(color, l3->free_colors_map, c, l3);
-
-       set_color_range(color, colors_map, c, llc_cache);
-       return ESUCCESS;
-}
-
-static inline error_t __cache_color_alloc(cache_t* c, uint8_t* colors_map)
-{
-       if(BITMASK_IS_CLEAR(c->free_colors_map, c->num_colors))
-               return -ENOCACHE;
-
-       int color=0;
-       do {
-               if(GET_BITMASK_BIT(c->free_colors_map, color))
-                       break;
-       } while(++color);
-
-       return __cache_color_alloc_specific(color, c, colors_map);
-}
-
-static inline void __cache_color_free_specific(size_t color, cache_t* c,
-                                                     uint8_t* colors_map)
-{
-       if(GET_BITMASK_BIT(c->free_colors_map, color))
-               return;
-       else {
-               size_t r = llc_cache->num_colors / c->num_colors;
-               size_t base = color*r;
-               if(!BITMASK_IS_SET_IN_RANGE(colors_map, base, base+r))
-                       return;
-       }
-
-       if(l3)
-               set_color_range(color, l3->free_colors_map, c, l3);
-       if(l2)
-               set_color_range(color, l2->free_colors_map, c, l2);
-       if(l1)
-               set_color_range(color, l1->free_colors_map, c, l1);
-
-       clr_color_range(color, colors_map, c, llc_cache);
-}
-
-static inline void __cache_color_free(cache_t* c, uint8_t* colors_map)
-{
-       if(BITMASK_IS_FULL(c->free_colors_map, c->num_colors))
-               return;
-
-       int color=0;
-       do {
-               if(!GET_BITMASK_BIT(c->free_colors_map, color)) {
-                       size_t r = llc_cache->num_colors / c->num_colors;
-                       size_t base = color*r;
-                       if(BITMASK_IS_SET_IN_RANGE(colors_map, base, base+r))
-                               break;
-               }
-       } while(++color < c->num_colors);
-       if(color == c->num_colors)
-               return;
-
-       __cache_color_free_specific(color, c, colors_map);
-}
-
-uint8_t* cache_colors_map_alloc() {
-#ifdef CONFIG_PAGE_COLORING
-       uint8_t* colors_map = kmalloc(llc_cache->num_colors, 0);
-       if(colors_map)
-               CLR_BITMASK(colors_map, llc_cache->num_colors);
-       return colors_map;
-#else
-       return global_cache_colors_map;
-#endif
-}
-
-void cache_colors_map_free(uint8_t* colors_map) {
-#ifdef CONFIG_PAGE_COLORING
-       kfree(colors_map);
-#endif
-}
-
-error_t cache_color_alloc(cache_t* c, uint8_t* colors_map)
-{
-       spin_lock_irqsave(&cache_colors_lock);
-       error_t e = __cache_color_alloc(c, colors_map);
-       spin_unlock_irqsave(&cache_colors_lock);
-       return e;
-}
-error_t cache_color_alloc_specific(size_t color, cache_t* c, uint8_t* colors_map)
-{
-       spin_lock_irqsave(&cache_colors_lock);
-       error_t e = __cache_color_alloc_specific(color, c, colors_map);
-       spin_unlock_irqsave(&cache_colors_lock);
-       return e;
-}
-
-void cache_color_free(cache_t* c, uint8_t* colors_map)
-{
-       spin_lock_irqsave(&cache_colors_lock);
-       __cache_color_free(c, colors_map);
-       spin_unlock_irqsave(&cache_colors_lock);
-}
-void cache_color_free_specific(size_t color, cache_t* c, uint8_t* colors_map)
-{
-       spin_lock_irqsave(&cache_colors_lock);
-       __cache_color_free_specific(color, c, colors_map);
-       spin_unlock_irqsave(&cache_colors_lock);
-}
-
index 4057e94..22cbf90 100644 (file)
@@ -137,14 +137,11 @@ void kernel_init(multiboot_info_t *mboot_info)
        printk("Boot Command Line: '%s'\n", boot_cmdline);
 
        exception_table_init();
-       cache_init();                                   // Determine systems's cache properties
        pmem_init(multiboot_kaddr);
        kmem_cache_init();              // Sets up slab allocator
        kmalloc_init();
        hashtable_init();
        radix_init();
-       cache_color_alloc_init();       // Inits data structs
-       colored_page_alloc_init();      // Allocates colors for agnostic processes
        acpiinit();
        topology_init();
        percpu_init();
index 9008f37..35146c3 100644 (file)
 
 KTEST_SUITE("POSTBOOT")
 
-#define l1 (available_caches.l1)
-#define l2 (available_caches.l2)
-#define l3 (available_caches.l3)
-
 #ifdef CONFIG_X86
 
 // TODO: Do test if possible inside this function, and add assertions.
@@ -121,136 +117,6 @@ bool test_pic_reception(void)
 
 #endif // CONFIG_X86
 
-// TODO: Add assertions. Possibly the way to go is to extract relevant info
-//       from cache properties and make assertions on the colored pages lists
-//       based on those.
-// TODO: The test was commented out. Figure out why was it like that and fix it.
-bool test_page_coloring(void)
-{
-       /*
-       //Print the different cache properties of our machine
-       print_cache_properties("L1", l1);
-       cprintf("\n");
-       print_cache_properties("L2", l2);
-       cprintf("\n");
-       print_cache_properties("L3", l3);
-       cprintf("\n");
-
-       //Print some stats about our memory
-       cprintf("Max Address: %llu\n", MAX_VADDR);
-       cprintf("Num Pages: %u\n", npages);
-
-       //Declare a local variable for allocating pages
-       page_t* page;
-
-       cprintf("Contents of the page free list:\n");
-       for(int i=0; i<llc_cache->num_colors; i++) {
-               cprintf("  COLOR %d:\n", i);
-               LIST_FOREACH(page, &colored_page_free_list[i], pg_link) {
-                       cprintf("    Page: %d\n", page2ppn(page));
-               }
-       }
-
-       //Run through and allocate all pages through l1_page_alloc
-       cprintf("Allocating from L1 page colors:\n");
-       for(int i=0; i<get_cache_num_page_colors(l1); i++) {
-               cprintf("  COLOR %d:\n", i);
-               while(colored_page_alloc(l1, &page, i) != -ENOMEM)
-                       cprintf("    Page: %d\n", page2ppn(page));
-       }
-
-       //Put all the pages back by reinitializing
-       page_init();
-
-       //Run through and allocate all pages through l2_page_alloc
-       cprintf("Allocating from L2 page colors:\n");
-       for(int i=0; i<get_cache_num_page_colors(l2); i++) {
-               cprintf("  COLOR %d:\n", i);
-               while(colored_page_alloc(l2, &page, i) != -ENOMEM)
-                       cprintf("    Page: %d\n", page2ppn(page));
-       }
-
-       //Put all the pages back by reinitializing
-       page_init();
-
-       //Run through and allocate all pages through l3_page_alloc
-       cprintf("Allocating from L3 page colors:\n");
-       for(int i=0; i<get_cache_num_page_colors(l3); i++) {
-               cprintf("  COLOR %d:\n", i);
-               while(colored_page_alloc(l3, &page, i) != -ENOMEM)
-                       cprintf("    Page: %d\n", page2ppn(page));
-       }
-
-       //Put all the pages back by reinitializing
-       page_init();
-
-       //Run through and allocate all pages through page_alloc
-       cprintf("Allocating from global allocator:\n");
-       while(upage_alloc(&page) != -ENOMEM)
-               cprintf("    Page: %d\n", page2ppn(page));
-
-       if(colored_page_alloc(l2, &page, 0) != -ENOMEM)
-               cprintf("Should not get here, all pages should already be gone!\n");
-       cprintf("All pages gone for sure...\n");
-
-       //Now lets put a few pages back using page_free..
-       cprintf("Reinserting pages via page_free and reallocating them...\n");
-       page_free(&pages[0]);
-       page_free(&pages[15]);
-       page_free(&pages[7]);
-       page_free(&pages[6]);
-       page_free(&pages[4]);
-
-       while(upage_alloc(&page) != -ENOMEM)
-               cprintf("Page: %d\n", page2ppn(page));
-
-       page_init();
-       */
-       return true;
-}
-
-// TODO: Add assertions.
-bool test_color_alloc(void) {
-       size_t checkpoint = 0;
-       uint8_t* colors_map = kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
-       cache_color_alloc(l2, colors_map);
-       cache_color_alloc(l3, colors_map);
-       cache_color_alloc(l3, colors_map);
-       cache_color_alloc(l2, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(l2, colors_map);
-       cache_color_free(llc_cache, colors_map);
-       cache_color_free(llc_cache, colors_map);
-
-print_cache_colors:
-       printk("L1 free colors, tot colors: %d\n", l1->num_colors);
-       PRINT_BITMASK(l1->free_colors_map, l1->num_colors);
-       printk("L2 free colors, tot colors: %d\n", l2->num_colors);
-       PRINT_BITMASK(l2->free_colors_map, l2->num_colors);
-       printk("L3 free colors, tot colors: %d\n", l3->num_colors);
-       PRINT_BITMASK(l3->free_colors_map, l3->num_colors);
-       printk("Process allocated colors\n");
-       PRINT_BITMASK(colors_map, llc_cache->num_colors);
-       printk("test_color_alloc() complete!\n");
-
-       return true;
-}
-
 barrier_t test_cpu_array;
 
 // TODO: Add assertions, try to do everything from within this same function.
@@ -2398,10 +2264,6 @@ static struct ktest ktests[] = {
        KTEST_REG(circ_buffer,        CONFIG_TEST_circ_buffer),
        KTEST_REG(kernel_messages,    CONFIG_TEST_kernel_messages),
 #endif // CONFIG_X86
-#ifdef CONFIG_PAGE_COLORING
-       KTEST_REG(page_coloring,      CONFIG_TEST_page_coloring),
-       KTEST_REG(color_alloc,        CONFIG_TEST_color_alloc),
-#endif // CONFIG_PAGE_COLORING
        KTEST_REG(barrier,            CONFIG_TEST_barrier),
        KTEST_REG(interrupts_irqsave, CONFIG_TEST_interrupts_irqsave),
        KTEST_REG(bitmasks,           CONFIG_TEST_bitmasks),
index 677e5ef..9004b21 100644 (file)
@@ -23,7 +23,6 @@
 #include <stdio.h>
 #include <time.h>
 #include <monitor.h>
-#include <colored_caches.h>
 #include <string.h>
 #include <pmap.h>
 #include <arch/console.h>
index 9d78646..dfc7df9 100644 (file)
@@ -1245,7 +1245,7 @@ usage:
 int mon_gfp(int argc, char **argv, struct hw_trapframe *hw_tf)
 {
        size_t naddrpages = max_paddr / PGSIZE;
-       spin_lock_irqsave(&colored_page_free_list_lock);
+       spin_lock_irqsave(&page_list_lock);
        printk("%9s %9s %9s\n", "start", "end", "size");
        for (int i = 0; i < naddrpages; i++) {
                int j;
@@ -1258,7 +1258,7 @@ int mon_gfp(int argc, char **argv, struct hw_trapframe *hw_tf)
                        i = j;
                }
        }
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+       spin_unlock_irqsave(&page_list_lock);
        return 0;
 }
 
index 0c6ffb2..49ec9db 100644 (file)
 #include <kmalloc.h>
 #include <blockdev.h>
 
-#define l1 (available_caches.l1)
-#define l2 (available_caches.l2)
-#define l3 (available_caches.l3)
+spinlock_t page_list_lock = SPINLOCK_INITIALIZER_IRQSAVE;
+
+page_list_t page_free_list = BSD_LIST_HEAD_INITIALIZER(page_free_list);
 
 static void __page_decref(page_t *page);
 static error_t __page_alloc_specific(page_t **page, size_t ppn);
 
-#ifdef CONFIG_PAGE_COLORING
-#define NUM_KERNEL_COLORS 8
-#else
-#define NUM_KERNEL_COLORS 1
-#endif
-
-
-// Global list of colors allocated to the general purpose memory allocator
-uint8_t* global_cache_colors_map;
-size_t global_next_color = 0;
-
-void colored_page_alloc_init()
-{
-       global_cache_colors_map =
-              kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
-       CLR_BITMASK(global_cache_colors_map, llc_cache->num_colors);
-       for(int i = 0; i < llc_cache->num_colors/NUM_KERNEL_COLORS; i++)
-               cache_color_alloc(llc_cache, global_cache_colors_map);
-}
-
 /* Initializes a page.  We can optimize this a bit since 0 usually works to init
  * most structures, but we'll hold off on that til it is a problem. */
 static void __page_init(struct page *page)
@@ -51,49 +31,6 @@ static void __page_init(struct page *page)
        page->pg_is_free = FALSE;
 }
 
-#define __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, predicate) \
-       /* Find first available color with pages available */                   \
-    /* in the given range */                                                \
-       int i = base_color;                                                     \
-       for (i; i < (base_color+range); i++) {                                  \
-               if((predicate))                                                     \
-                       break;                                                          \
-       }                                                                       \
-       /* Allocate a page from that color */                                   \
-       if(i < (base_color+range)) {                                            \
-               *page = BSD_LIST_FIRST(&colored_page_free_list[i]);                 \
-               BSD_LIST_REMOVE(*page, pg_link);                                    \
-               __page_init(*page);                                                 \
-               return i;                                                           \
-       }                                                                       \
-       return -ENOMEM;
-
-static ssize_t __page_alloc_from_color_range(page_t** page,
-                                           uint16_t base_color,
-                                           uint16_t range)
-{
-       __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range,
-                        !BSD_LIST_EMPTY(&colored_page_free_list[i]));
-}
-
-static ssize_t __page_alloc_from_color_map_range(page_t** page, uint8_t* map,
-                                              size_t base_color, size_t range)
-{
-       __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range,
-                   GET_BITMASK_BIT(map, i) &&
-                       !BSD_LIST_EMPTY(&colored_page_free_list[i]))
-}
-
-static ssize_t __colored_page_alloc(uint8_t* map, page_t** page,
-                                               size_t next_color)
-{
-       ssize_t ret;
-       if((ret = __page_alloc_from_color_map_range(page, map,
-                                  next_color, llc_cache->num_colors - next_color)) < 0)
-               ret = __page_alloc_from_color_map_range(page, map, 0, next_color);
-       return ret;
-}
-
 static void __real_page_alloc(struct page *page)
 {
        BSD_LIST_REMOVE(page, pg_link);
@@ -111,9 +48,21 @@ static error_t __page_alloc_specific(page_t** page, size_t ppn)
        return 0;
 }
 
+/* Helper, allocates a free page. */
+static struct page *get_a_free_page(void)
+{
+       struct page *ret;
+
+       spin_lock_irqsave(&page_list_lock);
+       ret = BSD_LIST_FIRST(&page_free_list);
+       if (ret)
+               __real_page_alloc(ret);
+       spin_unlock_irqsave(&page_list_lock);
+       return ret;
+}
+
 /**
  * @brief Allocates a physical page from a pool of unused physical memory.
- * Note, the page IS reference counted.
  *
  * Zeroes the page.
  *
@@ -123,48 +72,37 @@ static error_t __page_alloc_specific(page_t** page, size_t ppn)
  * @return ESUCCESS on success
  * @return -ENOMEM  otherwise
  */
-error_t upage_alloc(struct proc* p, page_t** page, int zero)
+error_t upage_alloc(struct proc *p, page_t **page, bool zero)
 {
-       spin_lock_irqsave(&colored_page_free_list_lock);
-       ssize_t ret = __colored_page_alloc(p->cache_colors_map,
-                                            page, p->next_cache_color);
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+       struct page *pg = get_a_free_page();
 
-       if (ret >= 0) {
-               if(zero)
-                       memset(page2kva(*page),0,PGSIZE);
-               p->next_cache_color = (ret + 1) & (llc_cache->num_colors-1);
-               return 0;
-       }
-       return ret;
+       if (!pg)
+               return -ENOMEM;
+       *page = pg;
+       if (zero)
+               memset(page2kva(*page), 0, PGSIZE);
+       return 0;
 }
 
-/* Allocates a refcounted page of memory for the kernel's use */
-error_t kpage_alloc(page_t** page)
+error_t kpage_alloc(page_t **page)
 {
-       ssize_t ret;
-       spin_lock_irqsave(&colored_page_free_list_lock);
-       if ((ret = __page_alloc_from_color_range(page, global_next_color,
-                                   llc_cache->num_colors - global_next_color)) < 0)
-               ret = __page_alloc_from_color_range(page, 0, global_next_color);
+       struct page *pg = get_a_free_page();
 
-       if (ret >= 0) {
-               global_next_color = ret;
-               ret = ESUCCESS;
-       }
-       spin_unlock_irqsave(&colored_page_free_list_lock);
-
-       return ret;
+       if (!pg)
+               return -ENOMEM;
+       *page = pg;
+       return 0;
 }
 
 /* Helper: allocates a refcounted page of memory for the kernel's use and
  * returns the kernel address (kernbase), or 0 on error. */
 void *kpage_alloc_addr(void)
 {
-       struct page *a_page;
-       if (kpage_alloc(&a_page))
+       struct page *pg = get_a_free_page();
+
+       if (!pg)
                return 0;
-       return page2kva(a_page);
+       return page2kva(pg);
 }
 
 void *kpage_zalloc_addr(void)
@@ -191,7 +129,7 @@ void *get_cont_pages(size_t order, int flags)
        size_t naddrpages = max_paddr / PGSIZE;
        // Find 'npages' free consecutive pages
        int first = -1;
-       spin_lock_irqsave(&colored_page_free_list_lock);
+       spin_lock_irqsave(&page_list_lock);
        for(int i=(naddrpages-1); i>=(npages-1); i--) {
                int j;
                for(j=i; j>=(i-(npages-1)); j--) {
@@ -211,7 +149,7 @@ void *get_cont_pages(size_t order, int flags)
        }
        //If we couldn't find them, return NULL
        if( first == -1 ) {
-               spin_unlock_irqsave(&colored_page_free_list_lock);
+               spin_unlock_irqsave(&page_list_lock);
                if (flags & MEM_ERROR)
                        error(ENOMEM, ERROR_FIXME);
                return NULL;
@@ -221,7 +159,7 @@ void *get_cont_pages(size_t order, int flags)
                page_t* page;
                __page_alloc_specific(&page, first+i);
        }
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+       spin_unlock_irqsave(&page_list_lock);
        return ppn2kva(first);
 }
 
@@ -266,10 +204,10 @@ void *get_cont_phys_pages_at(size_t order, physaddr_t at, int flags)
 
        if (first_pg_nr + nr_pgs > pa2ppn(max_paddr))
                return 0;
-       spin_lock_irqsave(&colored_page_free_list_lock);
+       spin_lock_irqsave(&page_list_lock);
        for (unsigned long i = first_pg_nr; i < first_pg_nr + nr_pgs; i++) {
                if (!page_is_free(i)) {
-                       spin_unlock_irqsave(&colored_page_free_list_lock);
+                       spin_unlock_irqsave(&page_list_lock);
                        if (flags & MEM_ERROR)
                                error(ENOMEM, ERROR_FIXME);
                        return NULL;
@@ -277,52 +215,23 @@ void *get_cont_phys_pages_at(size_t order, physaddr_t at, int flags)
        }
        for (unsigned long i = first_pg_nr; i < first_pg_nr + nr_pgs; i++)
                __real_page_alloc(ppn2page(i));
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+       spin_unlock_irqsave(&page_list_lock);
        return KADDR(at);
 }
 
 void free_cont_pages(void *buf, size_t order)
 {
        size_t npages = 1 << order;
-       spin_lock_irqsave(&colored_page_free_list_lock);
+       spin_lock_irqsave(&page_list_lock);
        for (size_t i = kva2ppn(buf); i < kva2ppn(buf) + npages; i++) {
                page_t* page = ppn2page(i);
                __page_decref(ppn2page(i));
                assert(page_is_free(i));
        }
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+       spin_unlock_irqsave(&page_list_lock);
        return;
 }
 
-/*
- * Allocates a specific physical page.
- * Does NOT set the contents of the physical page to zero -
- * the caller must do that if necessary.
- *
- * ppn         -- the page number to allocate
- * *page       -- is set to point to the Page struct
- *                of the newly allocated page
- *
- * RETURNS
- *   ESUCCESS  -- on success
- *   -ENOMEM   -- otherwise
- */
-error_t upage_alloc_specific(struct proc* p, page_t** page, size_t ppn)
-{
-       spin_lock_irqsave(&colored_page_free_list_lock);
-       __page_alloc_specific(page, ppn);
-       spin_unlock_irqsave(&colored_page_free_list_lock);
-       return 0;
-}
-
-error_t kpage_alloc_specific(page_t** page, size_t ppn)
-{
-       spin_lock_irqsave(&colored_page_free_list_lock);
-       __page_alloc_specific(page, ppn);
-       spin_unlock_irqsave(&colored_page_free_list_lock);
-       return 0;
-}
-
 /* Check if a page with the given physical page # is free. */
 int page_is_free(size_t ppn)
 {
@@ -332,9 +241,9 @@ int page_is_free(size_t ppn)
 /* Frees the page */
 void page_decref(page_t *page)
 {
-       spin_lock_irqsave(&colored_page_free_list_lock);
+       spin_lock_irqsave(&page_list_lock);
        __page_decref(page);
-       spin_unlock_irqsave(&colored_page_free_list_lock);
+       spin_unlock_irqsave(&page_list_lock);
 }
 
 /* Frees the page.  Don't call this without holding the lock already. */
@@ -344,11 +253,7 @@ static void __page_decref(page_t *page)
                free_bhs(page);
        /* Give our page back to the free list.  The protections for this are that
         * the list lock is grabbed by page_decref. */
-       BSD_LIST_INSERT_HEAD(
-          &(colored_page_free_list[get_page_color(page2ppn(page), llc_cache)]),
-          page,
-          pg_link
-       );
+       BSD_LIST_INSERT_HEAD(&page_free_list, page, pg_link);
        page->pg_is_free = TRUE;
 }
 
index 47840d7..221ab93 100644 (file)
@@ -342,9 +342,6 @@ error_t proc_alloc(struct proc **pp, struct proc *parent, int flags)
        /* only one ref, which we pass back.  the old 'existence' ref is managed by
         * the ksched */
        kref_init(&p->p_kref, __proc_free, 1);
-       // Setup the default map of where to get cache colors from
-       p->cache_colors_map = global_cache_colors_map;
-       p->next_cache_color = 0;
        /* Initialize the address space */
        if ((r = env_setup_vm(p)) < 0) {
                kmem_cache_free(proc_cache, p);
@@ -499,12 +496,6 @@ static void __proc_free(struct kref *kref)
        /* now we'll finally decref files for the file-backed vmrs */
        unmap_and_destroy_vmrs(p);
        frontend_proc_free(p);  /* TODO: please remove me one day */
-       /* Free any colors allocated to this process */
-       if (p->cache_colors_map != global_cache_colors_map) {
-               for(int i = 0; i < llc_cache->num_colors; i++)
-                       cache_color_free(llc_cache, p->cache_colors_map);
-               cache_colors_map_free(p->cache_colors_map);
-       }
        /* Remove us from the pid_hash and give our PID back (in that order). */
        spin_lock(&pid_hash_lock);
        hash_ret = hashtable_remove(pid_hash, (void*)(long)p->pid);
index e75c1b2..af1ebeb 100644 (file)
@@ -24,7 +24,6 @@
 #include <profiler.h>
 #include <stdio.h>
 #include <frontend.h>
-#include <colored_caches.h>
 #include <hashtable.h>
 #include <bitmask.h>
 #include <vfs.h>
@@ -740,11 +739,6 @@ static ssize_t sys_fork(env_t* e)
        }
        copy_current_ctx_to(&env->scp_ctx);
 
-       env->cache_colors_map = cache_colors_map_alloc();
-       for (int i = 0; i < llc_cache->num_colors; i++)
-               if (GET_BITMASK_BIT(e->cache_colors_map,i))
-                       cache_color_alloc(llc_cache, env->cache_colors_map);
-
        /* Make the new process have the same VMRs as the older.  This will copy the
         * contents of non MAP_SHARED pages to the new VMRs. */
        if (duplicate_vmrs(e, env)) {