2 * Copyright (c) 2009 The Regents of the University of California
3 * Barret Rhoden <brho@cs.berkeley.edu>
4 * Kevin Klues <klueska@cs.berkeley.edu>
5 * See LICENSE for details.
7 * Slab allocator, based on the SunOS 5.4 allocator paper.
9 * There is a list of kmem_cache, which are the caches of objects of a given
10 * size. This list is sorted in order of size. Each kmem_cache has three
11 * lists of slabs: full, partial, and empty.
13 * For large objects, the kmem_slabs point to bufctls, which have the address
14 * of their large buffers. These slabs can consist of more than one contiguous
17 * For small objects, the slabs do not use the bufctls. Instead, they point to
18 * the next free object in the slab. The free objects themselves hold the
19 * address of the next free item. The slab structure is stored at the end of
20 * the page. There is only one page per slab.
22 * TODO: Note, that this is a minor pain in the ass, and worth thinking about
23 * before implementing. To keep the constructor's state valid, we can't just
24 * overwrite things, so we need to add an extra 4-8 bytes per object for the
25 * pointer, and then pass over that data when we return the actual object's
26 * address. This also might fuck with alignment.
29 #ifndef ROS_KERN_SLAB_H
30 #define ROS_KERN_SLAB_H
32 #include <ros/common.h>
34 #include <sys/queue.h>
37 /* Back in the day, their cutoff for "large objects" was 512B, based on
38 * measurements and on not wanting more than 1/8 of internal fragmentation. */
39 #define NUM_BUF_PER_SLAB 8
40 #define SLAB_LARGE_CUTOFF (PGSIZE / NUM_BUF_PER_SLAB)
44 /* Control block for buffers for large-object slabs */
46 TAILQ_ENTRY(kmem_bufctl) link;
48 struct kmem_slab *my_slab;
50 TAILQ_HEAD(kmem_bufctl_list, kmem_bufctl);
52 /* Slabs contain the objects. Can be either full, partial, or empty,
53 * determined by checking the number of objects busy vs total. For large
54 * slabs, the bufctl list is used to find a free buffer. For small, the void*
57 TAILQ_ENTRY(kmem_slab) link;
62 struct kmem_bufctl_list bufctl_freelist
63 WHEN(obj_size > SLAB_LARGE_CUTOFF);
65 WHEN(obj_size <= SLAB_LARGE_CUTOFF);
68 TAILQ_HEAD(kmem_slab_list, kmem_slab);
72 SLIST_ENTRY(kmem_cache) link;
73 spinlock_t cache_lock;
78 struct kmem_slab_list full_slab_list;
79 struct kmem_slab_list partial_slab_list;
80 struct kmem_slab_list empty_slab_list;
81 void (*ctor)(void *, size_t);
82 void (*dtor)(void *, size_t);
83 unsigned long nr_cur_alloc;
86 /* List of all kmem_caches, sorted in order of size */
87 SLIST_HEAD(kmem_cache_list, kmem_cache);
88 extern struct kmem_cache_list kmem_caches;
90 /* Cache management */
91 struct kmem_cache *kmem_cache_create(const char *NTS name, size_t obj_size,
93 void (*ctor)(void *, size_t),
94 void (*dtor)(void *, size_t));
95 void kmem_cache_destroy(struct kmem_cache *cp);
96 /* Front end: clients of caches use these */
97 void *kmem_cache_alloc(struct kmem_cache *cp, int flags);
98 void kmem_cache_free(struct kmem_cache *cp, void *buf);
99 /* Back end: internal functions */
100 void kmem_cache_init(void);
101 void kmem_cache_reap(struct kmem_cache *cp);
104 void print_kmem_cache(struct kmem_cache *kc);
105 void print_kmem_slab(struct kmem_slab *slab);
106 #endif // !ROS_KERN_SLAB_H