1 /* Copyright (c) 2009 The Regents of the University of California.
2 * See the COPYRIGHT files at the top of this source tree for full
5 * Barret Rhoden <brho@cs.berkeley.edu>
6 * Kevin Klues <klueska@cs.berkeley.edu>
8 #include <ros/common.h>
16 #define kmallocdebug(args...) //printk(args)
18 //List of physical pages used by kmalloc
19 static spinlock_t pages_list_lock = SPINLOCK_INITIALIZER;
20 static page_list_t pages_list;
22 struct kmem_cache *kmalloc_caches[NUM_KMALLOC_CACHES];
24 static void __kfree_release(struct kref *kref);
26 void kmalloc_init(void)
28 char kc_name[KMC_NAME_SZ];
30 /* we want at least a 16 byte alignment of the tag so that the bufs kmalloc
31 * returns are 16 byte aligned. we used to check the actual size == 16,
32 * since we adjusted the KMALLOC_SMALLEST based on that. */
33 static_assert(ALIGNED(sizeof(struct kmalloc_tag), 16));
34 /* build caches of common sizes. this size will later include the tag and
35 * the actual returned buffer. */
36 size_t ksize = KMALLOC_SMALLEST;
37 for (int i = 0; i < NUM_KMALLOC_CACHES; i++) {
38 snprintf(kc_name, KMC_NAME_SZ, "kmalloc_%d", ksize);
39 kmalloc_caches[i] = kmem_cache_create(kc_name, ksize, KMALLOC_ALIGNMENT,
45 void *kmalloc(size_t size, int flags)
47 // reserve space for bookkeeping and preserve alignment
48 size_t ksize = size + sizeof(struct kmalloc_tag);
51 // determine cache to pull from
52 if (ksize <= KMALLOC_SMALLEST)
55 cache_id = LOG2_UP(ksize) - LOG2_UP(KMALLOC_SMALLEST);
56 // if we don't have a cache to handle it, alloc cont pages
57 if (cache_id >= NUM_KMALLOC_CACHES) {
58 /* The arena allocator will round up too, but we want to know in advance
59 * so that krealloc can avoid extra allocations. */
60 size_t amt_alloc = ROUNDUP(size + sizeof(struct kmalloc_tag), PGSIZE);
62 buf = kpages_alloc(amt_alloc, flags);
64 panic("Kmalloc failed! Handle me!");
65 // fill in the kmalloc tag
66 struct kmalloc_tag *tag = buf;
67 tag->flags = KMALLOC_TAG_PAGES;
68 tag->amt_alloc = amt_alloc;
69 tag->canary = KMALLOC_CANARY;
70 kref_init(&tag->kref, __kfree_release, 1);
71 return buf + sizeof(struct kmalloc_tag);
73 // else, alloc from the appropriate cache
74 buf = kmem_cache_alloc(kmalloc_caches[cache_id], flags);
76 panic("Kmalloc failed! Handle me!");
77 // store a pointer to the buffers kmem_cache in it's bookkeeping space
78 struct kmalloc_tag *tag = buf;
79 tag->flags = KMALLOC_TAG_CACHE;
80 tag->my_cache = kmalloc_caches[cache_id];
81 tag->canary = KMALLOC_CANARY;
82 kref_init(&tag->kref, __kfree_release, 1);
83 return buf + sizeof(struct kmalloc_tag);
86 void *kzmalloc(size_t size, int flags)
88 void *v = kmalloc(size, flags);
95 void *kmalloc_align(size_t size, int flags, size_t align)
98 int *tag_flags, offset;
99 /* alignment requests must be a multiple of long, even though we only need
100 * int in the current code. */
101 assert(ALIGNED(align, sizeof(long)));
102 /* must fit in the space reserved for the offset amount, which is at most
104 assert(align < (1 << (32 - KMALLOC_ALIGN_SHIFT)));
105 assert(IS_PWR2(align));
106 addr = kmalloc(size + align, flags);
109 if (ALIGNED(addr, align))
111 retaddr = ROUNDUP(addr, align);
112 offset = retaddr - addr;
113 assert(offset < align);
114 /* we might not have room for a full tag. we might have only 8 bytes. but
115 * we'll at least have room for the flags part. */
116 tag_flags = (int*)(retaddr - sizeof(int));
117 *tag_flags = (offset << KMALLOC_ALIGN_SHIFT) | KMALLOC_TAG_UNALIGN;
121 void *kzmalloc_align(size_t size, int flags, size_t align)
123 void *v = kmalloc_align(size, flags, align);
130 static struct kmalloc_tag *__get_km_tag(void *buf)
132 struct kmalloc_tag *tag = (struct kmalloc_tag*)(buf -
133 sizeof(struct kmalloc_tag));
134 if (tag->canary != KMALLOC_CANARY){
135 printk("__get_km_tag bad canary: %08lx@%p, buf %p, expected %08lx\n",
136 tag->canary, &tag->canary, buf, KMALLOC_CANARY);
137 hexdump((void *)(buf - sizeof(struct kmalloc_tag)), 256);
143 /* If we kmalloc_aligned, the buf we got back (and are now trying to perform
144 * some operation on) might not be the original, underlying, unaligned buf.
146 * This returns the underlying, unaligned buf, or 0 if the buf was not realigned
147 * in the first place. */
148 static void *__get_unaligned_orig_buf(void *buf)
150 int *tag_flags = (int*)(buf - sizeof(int));
151 if ((*tag_flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_UNALIGN)
152 return (buf - (*tag_flags >> KMALLOC_ALIGN_SHIFT));
157 void *krealloc(void* buf, size_t size, int flags)
161 struct kmalloc_tag *tag;
164 if (__get_unaligned_orig_buf(buf))
165 panic("krealloc of a kmalloc_align not supported");
166 tag = __get_km_tag(buf);
167 /* whatever we got from either a slab or the page allocator is meant for
168 * both the buf+size as well as the kmalloc tag */
169 if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_CACHE) {
170 osize = tag->my_cache->obj_size - sizeof(struct kmalloc_tag);
171 } else if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_PAGES) {
172 osize = tag->amt_alloc - sizeof(struct kmalloc_tag);
174 panic("Probably a bad tag, flags %p\n", tag->flags);
180 nbuf = kmalloc(size, flags);
182 /* would be more interesting to user error(...) here. */
183 /* but in any event, NEVER destroy buf! */
188 memmove(nbuf, buf, osize);
196 /* Grabs a reference on a buffer. Release with kfree().
198 * Note that a krealloc on a buffer with ref > 1 that needs a new, underlying
199 * buffer will result in two buffers existing. In this case, the krealloc is a
200 * kmalloc and a kfree, but that kfree does not completely free since the
201 * original ref > 1. */
202 void kmalloc_incref(void *buf)
204 void *orig_buf = __get_unaligned_orig_buf(buf);
205 buf = orig_buf ? orig_buf : buf;
206 /* if we want a smaller tag, we can extract the code from kref and manually
207 * set the release method in kfree. */
208 kref_get(&__get_km_tag(buf)->kref, 1);
211 int kmalloc_refcnt(void *buf)
213 void *orig_buf = __get_unaligned_orig_buf(buf);
214 buf = orig_buf ? orig_buf : buf;
215 return kref_refcnt(&__get_km_tag(buf)->kref);
218 static void __kfree_release(struct kref *kref)
220 struct kmalloc_tag *tag = container_of(kref, struct kmalloc_tag, kref);
221 if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_CACHE)
222 kmem_cache_free(tag->my_cache, tag);
223 else if ((tag->flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_PAGES)
224 kpages_free(tag, tag->amt_alloc);
226 panic("Bad flag 0x%x in %s", tag->flags, __FUNCTION__);
229 void kfree(void *buf)
234 orig_buf = __get_unaligned_orig_buf(buf);
235 buf = orig_buf ? orig_buf : buf;
236 kref_put(&__get_km_tag(buf)->kref);
239 void kmalloc_canary_check(char *str)
243 struct kmalloc_tag *tag = (struct kmalloc_tag*)(debug_canary -
244 sizeof(struct kmalloc_tag));
245 if (tag->canary != KMALLOC_CANARY)
246 panic("\t\t KMALLOC CANARY CHECK FAILED %s\n", str);
249 struct sized_alloc *sized_kzmalloc(size_t size, int flags)
251 struct sized_alloc *sza;
253 sza = kzmalloc(sizeof(struct sized_alloc) + size, flags);