Fixes VFS open bug
[akaros.git] / kern / src / slab.c
index b7ab539..d394d70 100644 (file)
@@ -107,13 +107,16 @@ static void kmem_slab_destroy(struct kmem_cache *cp, struct kmem_slab *a_slab)
                                buf += a_slab->obj_size;
                        }
                }
-               page_decref(kva2page(ROUNDDOWN(a_slab, PGSIZE)));
+               page_decref(kva2page((void*)ROUNDDOWN((uintptr_t)a_slab, PGSIZE)));
        } else {
                struct kmem_bufctl *i;
                void *page_start = (void*)-1;
-               // compute how many pages are allocated, given a power of two allocator
-               size_t num_pages = ROUNDUPPWR2(a_slab->num_total_obj * a_slab->obj_size)
-                                               / PGSIZE;
+               /* Figure out how much memory we asked for earlier.  We needed at least
+                * min_pgs.  We asked for the next highest order (power of 2) number of
+                * pages */
+               size_t min_pgs = ROUNDUP(NUM_BUF_PER_SLAB * a_slab->obj_size, PGSIZE) /
+                                        PGSIZE;
+               size_t order_pg_alloc = LOG2_UP(min_pgs);
                TAILQ_FOREACH(i, &a_slab->bufctl_freelist, link) {
                        // Track the lowest buffer address, which is the start of the buffer
                        page_start = MIN(page_start, i->buf_addr);
@@ -123,7 +126,7 @@ static void kmem_slab_destroy(struct kmem_cache *cp, struct kmem_slab *a_slab)
                        kmem_cache_free(kmem_bufctl_cache, i);
                }
                // free the pages for the slab's buffer
-               free_cont_pages(page_start, LOG2_UP(num_pages));
+               free_cont_pages(page_start, order_pg_alloc);
                // free the slab object
                kmem_cache_free(kmem_slab_cache, a_slab);
        }
@@ -209,8 +212,8 @@ void kmem_cache_free(struct kmem_cache *cp, void *buf)
        spin_lock_irqsave(&cp->cache_lock);
        if (cp->obj_size <= SLAB_LARGE_CUTOFF) {
                // find its slab
-               a_slab = (struct kmem_slab*)(ROUNDDOWN(buf, PGSIZE) + PGSIZE -
-                                            sizeof(struct kmem_slab));
+               a_slab = (struct kmem_slab*)(ROUNDDOWN((uintptr_t)buf, PGSIZE) +
+                                            PGSIZE - sizeof(struct kmem_slab));
                /* write location of next free small obj to the space at the end of the
                 * buffer, then list buf as the next free small obj */
                *(uintptr_t**)(buf + cp->obj_size) = a_slab->free_small_obj;
@@ -278,15 +281,18 @@ static void kmem_cache_grow(struct kmem_cache *cp)
                a_slab = kmem_cache_alloc(kmem_slab_cache, 0);
                // TODO: hash table for back reference (BUF)
                a_slab->obj_size = ROUNDUP(cp->obj_size + sizeof(uintptr_t), cp->align);
-               // alloc n pages, such that it can hold at least 8 items
-               size_t num_pgs = ROUNDUP(NUM_BUF_PER_SLAB * a_slab->obj_size, PGSIZE) /
-                                          PGSIZE;
-               // round up for the contiguous page allocator
-               void *buf = get_cont_pages(LOG2_UP(num_pgs), 0);
+               /* Figure out how much memory we want.  We need at least min_pgs.  We'll
+                * ask for the next highest order (power of 2) number of pages */
+               size_t min_pgs = ROUNDUP(NUM_BUF_PER_SLAB * a_slab->obj_size, PGSIZE) /
+                                        PGSIZE;
+               size_t order_pg_alloc = LOG2_UP(min_pgs);
+               void *buf = get_cont_pages(order_pg_alloc, 0);
                if (!buf)
                        panic("[German Accent]: OOM for a large slab growth!!!");
                a_slab->num_busy_obj = 0;
-               a_slab->num_total_obj = ROUNDUPPWR2(num_pgs)*PGSIZE / a_slab->obj_size;
+               /* The number of objects is based on the rounded up amt requested. */
+               a_slab->num_total_obj = ((1 << order_pg_alloc) * PGSIZE) /
+                                       a_slab->obj_size;
                TAILQ_INIT(&a_slab->bufctl_freelist);
                /* for each buffer, set up a bufctl and point to the buffer */
                for (int i = 0; i < a_slab->num_total_obj; i++) {
@@ -332,11 +338,11 @@ void print_kmem_cache(struct kmem_cache *cp)
        printk("Objsize: %d\n", cp->obj_size);
        printk("Align: %d\n", cp->align);
        printk("Flags: 0x%08x\n", cp->flags);
-       printk("Constructor: 0x%08x\n", cp->ctor);
-       printk("Destructor: 0x%08x\n", cp->dtor);
-       printk("Slab Full: 0x%08x\n", cp->full_slab_list);
-       printk("Slab Partial: 0x%08x\n", cp->partial_slab_list);
-       printk("Slab Empty: 0x%08x\n", cp->empty_slab_list);
+       printk("Constructor: %p\n", cp->ctor);
+       printk("Destructor: %p\n", cp->dtor);
+       printk("Slab Full: %p\n", cp->full_slab_list);
+       printk("Slab Partial: %p\n", cp->partial_slab_list);
+       printk("Slab Empty: %p\n", cp->empty_slab_list);
        printk("Current Allocations: %d\n", cp->nr_cur_alloc);
        spin_unlock_irqsave(&cp->cache_lock);
 }
@@ -348,10 +354,10 @@ void print_kmem_slab(struct kmem_slab *slab)
        printk("NumBusy: %d\n", slab->num_busy_obj);
        printk("Num_total: %d\n", slab->num_total_obj);
        if (slab->obj_size + sizeof(uintptr_t) < SLAB_LARGE_CUTOFF) {
-               printk("Free Small obj: 0x%08x\n", slab->free_small_obj);
+               printk("Free Small obj: %p\n", slab->free_small_obj);
                void *buf = slab->free_small_obj;
                for (int i = 0; i < slab->num_total_obj; i++) {
-                       printk("Addr of buf: 0x%08x, Addr of next: 0x%08x\n", buf,
+                       printk("Addr of buf: %p, Addr of next: %p\n", buf,
                               *((uintptr_t**)buf));
                        buf += slab->obj_size;
                }