X-Git-Url: http://akaros.cs.berkeley.edu/gitweb/?p=akaros.git;a=blobdiff_plain;f=kern%2Fsrc%2Fkmalloc.c;h=7581fbdc2a36db1b4e35f42061d212566768b6df;hp=863f8aa58d46a532d5a9dc6137686fdf6607bfe9;hb=45d47a9f3253bb96ce6eb657eba2e1c90cbf4975;hpb=d9e49fae8107e35de0a23f1a3ceb9b03d42f61de diff --git a/kern/src/kmalloc.c b/kern/src/kmalloc.c index 863f8aa..7581fbd 100644 --- a/kern/src/kmalloc.c +++ b/kern/src/kmalloc.c @@ -1,9 +1,9 @@ -/* Copyright (c) 2009 The Regents of the University of California. - * See the COPYRIGHT files at the top of this source tree for full +/* Copyright (c) 2009 The Regents of the University of California. + * See the COPYRIGHT files at the top of this source tree for full * license information. - * + * * Barret Rhoden - * Kevin Klues + * Kevin Klues */ #include #include @@ -25,6 +25,8 @@ static void __kfree_release(struct kref *kref); void kmalloc_init(void) { + char kc_name[KMC_NAME_SZ]; + /* we want at least a 16 byte alignment of the tag so that the bufs kmalloc * returns are 16 byte aligned. we used to check the actual size == 16, * since we adjusted the KMALLOC_SMALLEST based on that. */ @@ -33,13 +35,14 @@ void kmalloc_init(void) * the actual returned buffer. */ size_t ksize = KMALLOC_SMALLEST; for (int i = 0; i < NUM_KMALLOC_CACHES; i++) { - kmalloc_caches[i] = kmem_cache_create("kmalloc_cache", ksize, - KMALLOC_ALIGNMENT, 0, 0, 0); + snprintf(kc_name, KMC_NAME_SZ, "kmalloc_%d", ksize); + kmalloc_caches[i] = kmem_cache_create(kc_name, ksize, KMALLOC_ALIGNMENT, + 0, NULL, 0, 0, NULL); ksize <<= 1; } } -void *kmalloc(size_t size, int flags) +void *kmalloc(size_t size, int flags) { // reserve space for bookkeeping and preserve alignment size_t ksize = size + sizeof(struct kmalloc_tag); @@ -52,15 +55,17 @@ void *kmalloc(size_t size, int flags) cache_id = LOG2_UP(ksize) - LOG2_UP(KMALLOC_SMALLEST); // if we don't have a cache to handle it, alloc cont pages if (cache_id >= NUM_KMALLOC_CACHES) { - size_t num_pgs = ROUNDUP(size + sizeof(struct kmalloc_tag), PGSIZE) / - PGSIZE; - buf = get_cont_pages(LOG2_UP(num_pgs), flags); + /* The arena allocator will round up too, but we want to know in advance + * so that krealloc can avoid extra allocations. */ + size_t amt_alloc = ROUNDUP(size + sizeof(struct kmalloc_tag), PGSIZE); + + buf = kpages_alloc(amt_alloc, flags); if (!buf) panic("Kmalloc failed! Handle me!"); // fill in the kmalloc tag struct kmalloc_tag *tag = buf; tag->flags = KMALLOC_TAG_PAGES; - tag->num_pages = num_pgs; + tag->amt_alloc = amt_alloc; tag->canary = KMALLOC_CANARY; kref_init(&tag->kref, __kfree_release, 1); return buf + sizeof(struct kmalloc_tag); @@ -78,7 +83,7 @@ void *kmalloc(size_t size, int flags) return buf + sizeof(struct kmalloc_tag); } -void *kzmalloc(size_t size, int flags) +void *kzmalloc(size_t size, int flags) { void *v = kmalloc(size, flags); if (!v) @@ -164,8 +169,7 @@ void *krealloc(void* buf, size_t size, int flags) if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_CACHE) { osize = tag->my_cache->obj_size - sizeof(struct kmalloc_tag); } else if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_PAGES) { - osize = LOG2_UP(tag->num_pages) * PGSIZE - - sizeof(struct kmalloc_tag); + osize = tag->amt_alloc - sizeof(struct kmalloc_tag); } else { panic("Probably a bad tag, flags %p\n", tag->flags); } @@ -217,7 +221,7 @@ static void __kfree_release(struct kref *kref) if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_CACHE) kmem_cache_free(tag->my_cache, tag); else if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_PAGES) - free_cont_pages(tag, LOG2_UP(tag->num_pages)); + kpages_free(tag, tag->amt_alloc); else panic("Bad flag 0x%x in %s", tag->flags, __FUNCTION__); } @@ -241,3 +245,25 @@ void kmalloc_canary_check(char *str) if (tag->canary != KMALLOC_CANARY) panic("\t\t KMALLOC CANARY CHECK FAILED %s\n", str); } + +struct sized_alloc *sized_kzmalloc(size_t size, int flags) +{ + struct sized_alloc *sza; + + sza = kzmalloc(sizeof(struct sized_alloc) + size, flags); + if (!sza) + return NULL; + sza->buf = sza + 1; + sza->size = size; + return sza; +} + +void sza_printf(struct sized_alloc *sza, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + sza->sofar += vsnprintf(sza->buf + sza->sofar, sza->size - sza->sofar, + fmt, ap); + va_end(ap); +}