slab: Remove obj_size from struct kmem_slab
[akaros.git] / kern / src / slab.c
index ee8288d..a26cd15 100644 (file)
 #include <hash.h>
 #include <arena.h>
 
-struct kmem_cache_list kmem_caches;
-spinlock_t kmem_caches_lock;
+struct kmem_cache_list kmem_caches = SLIST_HEAD_INITIALIZER(kmem_caches);
+spinlock_t kmem_caches_lock = SPINLOCK_INITIALIZER_IRQSAVE;
 
 /* Backend/internal functions, defined later.  Grab the lock before calling
  * these. */
 static bool kmem_cache_grow(struct kmem_cache *cp);
+static void *__kmem_alloc_from_slab(struct kmem_cache *cp, int flags);
+static void __kmem_free_to_slab(struct kmem_cache *cp, void *buf);
 
 /* Cache of the kmem_cache objects, needed for bootstrapping */
 struct kmem_cache kmem_cache_cache[1];
@@ -42,11 +44,11 @@ void __kmem_cache_create(struct kmem_cache *kc, const char *name,
        assert(align);
        spinlock_init_irqsave(&kc->cache_lock);
        strlcpy(kc->name, name, KMC_NAME_SZ);
-       kc->obj_size = obj_size;
-       /* TODO: when we are used from a qcache, we'll have a flag that tells us how
-        * to set this to interact with the arena nicely. */
-       kc->import_amt = ROUNDUP(NUM_BUF_PER_SLAB * ROUNDUP(obj_size, align),
-                                PGSIZE);
+       kc->obj_size = ROUNDUP(obj_size, align);
+       if (flags & KMC_QCACHE)
+               kc->import_amt = ROUNDUPPWR2(3 * source->qcache_max);
+       else
+               kc->import_amt = ROUNDUP(NUM_BUF_PER_SLAB * obj_size, PGSIZE);
        kc->align = align;
        if (align > PGSIZE)
                panic("Cache %s object alignment is actually MIN(PGSIZE, align (%p))",
@@ -89,8 +91,6 @@ void __kmem_cache_create(struct kmem_cache *kc, const char *name,
 
 void kmem_cache_init(void)
 {
-       spinlock_init_irqsave(&kmem_caches_lock);
-       SLIST_INIT(&kmem_caches);
        __kmem_cache_create(kmem_cache_cache, "kmem_cache",
                            sizeof(struct kmem_cache),
                            __alignof__(struct kmem_cache), 0, base_arena,
@@ -120,14 +120,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t obj_size,
 static void kmem_slab_destroy(struct kmem_cache *cp, struct kmem_slab *a_slab)
 {
        if (!__use_bufctls(cp)) {
-               /* Deconstruct all the objects, if necessary */
-               if (cp->dtor) {
-                       void *buf = a_slab->free_small_obj;
-                       for (int i = 0; i < a_slab->num_total_obj; i++) {
-                               cp->dtor(buf, cp->obj_size);
-                               buf += a_slab->obj_size;
-                       }
-               }
                arena_free(cp->source, ROUNDDOWN(a_slab, PGSIZE), PGSIZE);
        } else {
                struct kmem_bufctl *i, *temp;
@@ -136,9 +128,6 @@ static void kmem_slab_destroy(struct kmem_cache *cp, struct kmem_slab *a_slab)
                BSD_LIST_FOREACH_SAFE(i, &a_slab->bufctl_freelist, link, temp) {
                        // Track the lowest buffer address, which is the start of the buffer
                        buf_start = MIN(buf_start, i->buf_addr);
-                       /* Deconstruct all the objects, if necessary */
-                       if (cp->dtor)
-                               cp->dtor(i->buf_addr, cp->obj_size);
                        /* This is a little dangerous, but we can skip removing, since we
                         * init the freelist when we reuse the slab. */
                        kmem_cache_free(kmem_bufctl_cache, i);
@@ -238,8 +227,8 @@ static struct kmem_bufctl *__yank_bufctl(struct kmem_cache *cp, void *buf)
        return bc_i;
 }
 
-/* Front end: clients of caches use these */
-void *kmem_cache_alloc(struct kmem_cache *cp, int flags)
+/* Alloc, bypassing the magazines and depot */
+static void *__kmem_alloc_from_slab(struct kmem_cache *cp, int flags)
 {
        void *retval = NULL;
        spin_lock_irqsave(&cp->cache_lock);
@@ -264,10 +253,9 @@ void *kmem_cache_alloc(struct kmem_cache *cp, int flags)
        // have a partial now (a_slab), get an item, return item
        if (!__use_bufctls(cp)) {
                retval = a_slab->free_small_obj;
-               /* adding the size of the cache_obj to get to the pointer at end of the
-                * buffer pointing to the next free_small_obj */
-               a_slab->free_small_obj = *(uintptr_t**)(a_slab->free_small_obj +
-                                                       cp->obj_size);
+               /* the next free_small_obj address is stored at the beginning of the
+                * current free_small_obj. */
+               a_slab->free_small_obj = *(uintptr_t**)(a_slab->free_small_obj);
        } else {
                // rip the first bufctl out of the partial slab's buf list
                struct kmem_bufctl *a_bufctl = BSD_LIST_FIRST(&a_slab->bufctl_freelist);
@@ -284,22 +272,33 @@ void *kmem_cache_alloc(struct kmem_cache *cp, int flags)
        }
        cp->nr_cur_alloc++;
        spin_unlock_irqsave(&cp->cache_lock);
+       if (cp->ctor)
+               cp->ctor(retval, cp->obj_size);
        return retval;
 }
 
-void kmem_cache_free(struct kmem_cache *cp, void *buf)
+void *kmem_cache_alloc(struct kmem_cache *cp, int flags)
+{
+       return __kmem_alloc_from_slab(cp, flags);
+}
+
+/* Returns an object to the slab layer.  Note that objects in the slabs are
+ * unconstructed. */
+static void __kmem_free_to_slab(struct kmem_cache *cp, void *buf)
 {
        struct kmem_slab *a_slab;
        struct kmem_bufctl *a_bufctl;
 
+       if (cp->dtor)
+               cp->dtor(buf, cp->obj_size);
        spin_lock_irqsave(&cp->cache_lock);
        if (!__use_bufctls(cp)) {
                // find its slab
                a_slab = (struct kmem_slab*)(ROUNDDOWN((uintptr_t)buf, PGSIZE) +
                                             PGSIZE - sizeof(struct kmem_slab));
-               /* write location of next free small obj to the space at the end of the
-                * buffer, then list buf as the next free small obj */
-               *(uintptr_t**)(buf + cp->obj_size) = a_slab->free_small_obj;
+               /* write location of next free small obj to the space at the beginning
+                * of the buffer, then list buf as the next free small obj */
+               *(uintptr_t**)buf = a_slab->free_small_obj;
                a_slab->free_small_obj = buf;
        } else {
                /* Give the bufctl back to the parent slab */
@@ -321,6 +320,11 @@ void kmem_cache_free(struct kmem_cache *cp, void *buf)
        spin_unlock_irqsave(&cp->cache_lock);
 }
 
+void kmem_cache_free(struct kmem_cache *cp, void *buf)
+{
+       __kmem_free_to_slab(cp, buf);
+}
+
 /* Back end: internal functions */
 /* When this returns, the cache has at least one slab in the empty list.  If
  * page_alloc fails, there are some serious issues.  This only grows by one slab
@@ -346,49 +350,40 @@ static bool kmem_cache_grow(struct kmem_cache *cp)
                // the slab struct is stored at the end of the page
                a_slab = (struct kmem_slab*)(a_page + PGSIZE
                                             - sizeof(struct kmem_slab));
-               // Need to add room for the next free item pointer in the object buffer.
-               a_slab->obj_size = ROUNDUP(cp->obj_size + sizeof(uintptr_t), cp->align);
                a_slab->num_busy_obj = 0;
                a_slab->num_total_obj = (PGSIZE - sizeof(struct kmem_slab)) /
-                                       a_slab->obj_size;
+                                       cp->obj_size;
                // TODO: consider staggering this IAW section 4.3
                a_slab->free_small_obj = a_page;
                /* Walk and create the free list, which is circular.  Each item stores
-                * the location of the next one at the end of the block. */
+                * the location of the next one at the beginning of the block. */
                void *buf = a_slab->free_small_obj;
                for (int i = 0; i < a_slab->num_total_obj - 1; i++) {
-                       // Initialize the object, if necessary
-                       if (cp->ctor)
-                               cp->ctor(buf, cp->obj_size);
-                       *(uintptr_t**)(buf + cp->obj_size) = buf + a_slab->obj_size;
-                       buf += a_slab->obj_size;
+                       *(uintptr_t**)buf = buf + cp->obj_size;
+                       buf += cp->obj_size;
                }
-               *((uintptr_t**)(buf + cp->obj_size)) = NULL;
+               *((uintptr_t**)buf) = NULL;
        } else {
                void *buf;
 
                a_slab = kmem_cache_alloc(kmem_slab_cache, 0);
                if (!a_slab)
                        return FALSE;
-               a_slab->obj_size = ROUNDUP(cp->obj_size, cp->align);
                buf = arena_alloc(cp->source, cp->import_amt, MEM_ATOMIC);
                if (!buf) {
                        kmem_cache_free(kmem_slab_cache, a_slab);
                        return FALSE;
                }
                a_slab->num_busy_obj = 0;
-               a_slab->num_total_obj = cp->import_amt / a_slab->obj_size;
+               a_slab->num_total_obj = cp->import_amt / cp->obj_size;
                BSD_LIST_INIT(&a_slab->bufctl_freelist);
                /* for each buffer, set up a bufctl and point to the buffer */
                for (int i = 0; i < a_slab->num_total_obj; i++) {
-                       // Initialize the object, if necessary
-                       if (cp->ctor)
-                               cp->ctor(buf, cp->obj_size);
                        a_bufctl = kmem_cache_alloc(kmem_bufctl_cache, 0);
                        BSD_LIST_INSERT_HEAD(&a_slab->bufctl_freelist, a_bufctl, link);
                        a_bufctl->buf_addr = buf;
                        a_bufctl->my_slab = a_slab;
-                       buf += a_slab->obj_size;
+                       buf += cp->obj_size;
                }
        }
        // add a_slab to the empty_list
@@ -420,7 +415,7 @@ void print_kmem_cache(struct kmem_cache *cp)
        spin_lock_irqsave(&cp->cache_lock);
        printk("\nPrinting kmem_cache:\n---------------------\n");
        printk("Name: %s\n", cp->name);
-       printk("Objsize: %d\n", cp->obj_size);
+       printk("Objsize (incl align): %d\n", cp->obj_size);
        printk("Align: %d\n", cp->align);
        printk("Flags: 0x%08x\n", cp->flags);
        printk("Constructor: %p\n", cp->ctor);
@@ -431,25 +426,3 @@ void print_kmem_cache(struct kmem_cache *cp)
        printk("Current Allocations: %d\n", cp->nr_cur_alloc);
        spin_unlock_irqsave(&cp->cache_lock);
 }
-
-void print_kmem_slab(struct kmem_slab *slab)
-{
-       printk("\nPrinting kmem_slab:\n---------------------\n");
-       printk("Objsize: %d (%p)\n", slab->obj_size, slab->obj_size);
-       printk("NumBusy: %d\n", slab->num_busy_obj);
-       printk("Num_total: %d\n", slab->num_total_obj);
-       /* This will break if we have a NOTOUCH small slab.  It's debugging code, so
-        * just be careful. */
-       if (slab->obj_size + sizeof(uintptr_t) < SLAB_LARGE_CUTOFF) {
-               printk("Free Small obj: %p\n", slab->free_small_obj);
-               void *buf = slab->free_small_obj;
-               for (int i = 0; i < slab->num_total_obj; i++) {
-                       printk("Addr of buf: %p, Addr of next: %p\n", buf,
-                              *((uintptr_t**)buf));
-                       buf += slab->obj_size;
-               }
-       } else {
-               printk("This is a big slab!\n");
-       }
-}
-