Use the better hash multiplier for __generic_hash
[akaros.git] / kern / src / slab.c
index 04c51d6..4581bcb 100644 (file)
@@ -70,7 +70,7 @@
  *   grab a pcc lock.
  *
  * TODO:
- * - Change the sigs of ctor/dtor, add reclaim function.
+ * - Add reclaim function.
  * - When resizing, do we want to go through the depot and consolidate
  *   magazines?  (probably not a big deal.  maybe we'd deal with it when we
  *   clean up our excess mags.)
@@ -94,8 +94,9 @@
 uint64_t resize_timeout_ns = 1000000000;
 unsigned int resize_threshold = 1;
 
-struct kmem_cache_list kmem_caches = SLIST_HEAD_INITIALIZER(kmem_caches);
-spinlock_t kmem_caches_lock = SPINLOCK_INITIALIZER_IRQSAVE;
+/* Protected by the arenas_and_slabs_lock. */
+struct kmem_cache_tailq all_kmem_caches =
+               TAILQ_HEAD_INITIALIZER(all_kmem_caches);
 
 /* Backend/internal functions, defined later.  Grab the lock before calling
  * these. */
@@ -229,8 +230,11 @@ static void __return_to_depot(struct kmem_cache *kc, struct kmem_magazine *mag)
  * layer. */
 static void drain_mag(struct kmem_cache *kc, struct kmem_magazine *mag)
 {
-       for (int i = 0; i < mag->nr_rounds; i++)
+       for (int i = 0; i < mag->nr_rounds; i++) {
+               if (kc->dtor)
+                       kc->dtor(mag->rounds[i], kc->priv);
                __kmem_free_to_slab(kc, mag->rounds[i]);
+       }
        mag->nr_rounds = 0;
 }
 
@@ -254,8 +258,8 @@ static struct kmem_pcpu_cache *build_pcpu_caches(void)
 void __kmem_cache_create(struct kmem_cache *kc, const char *name,
                          size_t obj_size, int align, int flags,
                          struct arena *source,
-                         void (*ctor)(void *, size_t),
-                         void (*dtor)(void *, size_t))
+                         int (*ctor)(void *, void *, int),
+                         void (*dtor)(void *, void *), void *priv)
 {
        assert(kc);
        assert(align);
@@ -278,7 +282,9 @@ void __kmem_cache_create(struct kmem_cache *kc, const char *name,
        TAILQ_INIT(&kc->empty_slab_list);
        kc->ctor = ctor;
        kc->dtor = dtor;
+       kc->priv = priv;
        kc->nr_cur_alloc = 0;
+       kc->nr_direct_allocs_ever = 0;
        kc->alloc_hash = kc->static_hash;
        hash_init_hh(&kc->hh);
        for (int i = 0; i < kc->hh.nr_hash_lists; i++)
@@ -293,28 +299,18 @@ void __kmem_cache_create(struct kmem_cache *kc, const char *name,
        /* We do this last, since this will all into the magazine cache - which we
         * could be creating on this call! */
        kc->pcpu_caches = build_pcpu_caches();
-       /* put in cache list based on it's size */
-       struct kmem_cache *i, *prev = NULL;
-       spin_lock_irqsave(&kmem_caches_lock);
-       /* find the kmem_cache before us in the list.  yes, this is O(n). */
-       SLIST_FOREACH(i, &kmem_caches, link) {
-               if (i->obj_size < kc->obj_size)
-                       prev = i;
-               else
-                       break;
-       }
-       if (prev)
-               SLIST_INSERT_AFTER(prev, kc, link);
-       else
-               SLIST_INSERT_HEAD(&kmem_caches, kc, link);
-       spin_unlock_irqsave(&kmem_caches_lock);
+       add_importing_slab(kc->source, kc);
+       qlock(&arenas_and_slabs_lock);
+       TAILQ_INSERT_TAIL(&all_kmem_caches, kc, all_kmc_link);
+       qunlock(&arenas_and_slabs_lock);
 }
 
-static void __mag_ctor(void *obj, size_t _ign)
+static int __mag_ctor(void *obj, void *priv, int flags)
 {
        struct kmem_magazine *mag = (struct kmem_magazine*)obj;
 
        mag->nr_rounds = 0;
+       return 0;
 }
 
 void kmem_cache_init(void)
@@ -325,30 +321,33 @@ void kmem_cache_init(void)
        __kmem_cache_create(kmem_magazine_cache, "kmem_magazine",
                            sizeof(struct kmem_magazine),
                            __alignof__(struct kmem_magazine), 0, base_arena,
-                           __mag_ctor, NULL);
+                           __mag_ctor, NULL, NULL);
        __kmem_cache_create(kmem_cache_cache, "kmem_cache",
                            sizeof(struct kmem_cache),
                            __alignof__(struct kmem_cache), 0, base_arena,
-                           NULL, NULL);
+                           NULL, NULL, NULL);
        __kmem_cache_create(kmem_slab_cache, "kmem_slab",
                            sizeof(struct kmem_slab),
                            __alignof__(struct kmem_slab), 0, base_arena,
-                           NULL, NULL);
+                           NULL, NULL, NULL);
        __kmem_cache_create(kmem_bufctl_cache, "kmem_bufctl",
                            sizeof(struct kmem_bufctl),
                            __alignof__(struct kmem_bufctl), 0, base_arena,
-                           NULL, NULL);
+                           NULL, NULL, NULL);
 }
 
 /* Cache management */
 struct kmem_cache *kmem_cache_create(const char *name, size_t obj_size,
                                      int align, int flags,
                                      struct arena *source,
-                                     void (*ctor)(void *, size_t),
-                                     void (*dtor)(void *, size_t))
+                                     int (*ctor)(void *, void *, int),
+                                     void (*dtor)(void *, void *),
+                                     void *priv)
 {
        struct kmem_cache *kc = kmem_cache_alloc(kmem_cache_cache, 0);
-       __kmem_cache_create(kc, name, obj_size, align, flags, source, ctor, dtor);
+
+       __kmem_cache_create(kc, name, obj_size, align, flags, source, ctor, dtor,
+                           priv);
        return kc;
 }
 
@@ -413,6 +412,10 @@ void kmem_cache_destroy(struct kmem_cache *cp)
 {
        struct kmem_slab *a_slab, *next;
 
+       qlock(&arenas_and_slabs_lock);
+       TAILQ_REMOVE(&all_kmem_caches, cp, all_kmc_link);
+       qunlock(&arenas_and_slabs_lock);
+       del_importing_slab(cp->source, cp);
        drain_pcpu_caches(cp);
        depot_destroy(cp);
        spin_lock_irqsave(&cp->cache_lock);
@@ -427,11 +430,8 @@ void kmem_cache_destroy(struct kmem_cache *cp)
                kmem_slab_destroy(cp, a_slab);
                a_slab = next;
        }
-       spin_lock_irqsave(&kmem_caches_lock);
-       SLIST_REMOVE(&kmem_caches, cp, kmem_cache, link);
-       spin_unlock_irqsave(&kmem_caches_lock);
-       kmem_cache_free(kmem_cache_cache, cp);
        spin_unlock_irqsave(&cp->cache_lock);
+       kmem_cache_free(kmem_cache_cache, cp);
 }
 
 static void __try_hash_resize(struct kmem_cache *cp)
@@ -543,9 +543,15 @@ static void *__kmem_alloc_from_slab(struct kmem_cache *cp, int flags)
                TAILQ_INSERT_HEAD(&cp->full_slab_list, a_slab, link);
        }
        cp->nr_cur_alloc++;
+       cp->nr_direct_allocs_ever++;
        spin_unlock_irqsave(&cp->cache_lock);
-       if (cp->ctor)
-               cp->ctor(retval, cp->obj_size);
+       if (cp->ctor) {
+               if (cp->ctor(retval, cp->priv, flags)) {
+                       warn("Ctor %p failed, probably a bug!");
+                       __kmem_free_to_slab(cp, retval);
+                       return NULL;
+               }
+       }
        return retval;
 }
 
@@ -586,15 +592,13 @@ try_alloc:
        return __kmem_alloc_from_slab(kc, flags);
 }
 
-/* Returns an object to the slab layer.  Note that objects in the slabs are
- * unconstructed. */
+/* Returns an object to the slab layer.  Caller must deconstruct the objects.
+ * Note that objects in the slabs are unconstructed. */
 static void __kmem_free_to_slab(struct kmem_cache *cp, void *buf)
 {
        struct kmem_slab *a_slab;
        struct kmem_bufctl *a_bufctl;
 
-       if (cp->dtor)
-               cp->dtor(buf, cp->obj_size);
        spin_lock_irqsave(&cp->cache_lock);
        if (!__use_bufctls(cp)) {
                // find its slab
@@ -674,6 +678,8 @@ try_free:
                lock_pcu_cache(pcc);
                goto try_free;
        }
+       if (kc->dtor)
+               kc->dtor(buf, kc->priv);
        __kmem_free_to_slab(kc, buf);
 }
 
@@ -761,20 +767,3 @@ void kmem_cache_reap(struct kmem_cache *cp)
        }
        spin_unlock_irqsave(&cp->cache_lock);
 }
-
-void print_kmem_cache(struct kmem_cache *cp)
-{
-       spin_lock_irqsave(&cp->cache_lock);
-       printk("\nPrinting kmem_cache:\n---------------------\n");
-       printk("Name: %s\n", cp->name);
-       printk("Objsize (incl align): %d\n", cp->obj_size);
-       printk("Align: %d\n", cp->align);
-       printk("Flags: 0x%08x\n", cp->flags);
-       printk("Constructor: %p\n", cp->ctor);
-       printk("Destructor: %p\n", cp->dtor);
-       printk("Slab Full: %p\n", cp->full_slab_list);
-       printk("Slab Partial: %p\n", cp->partial_slab_list);
-       printk("Slab Empty: %p\n", cp->empty_slab_list);
-       printk("Current Allocations: %d\n", cp->nr_cur_alloc);
-       spin_unlock_irqsave(&cp->cache_lock);
-}