1 /* Copyright (c) 2016 Google Inc
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * #mem, memory diagnostics (arenas and slabs)
15 #include <sys/queue.h>
17 struct dev mem_devtab;
19 static char *devname(void)
21 return mem_devtab.name;
32 static struct dirtab mem_dir[] = {
33 {".", {Qdir, 0, QTDIR}, 0, DMDIR | 0555},
34 {"arena_stats", {Qarena_stats, 0, QTFILE}, 0, 0444},
35 {"slab_stats", {Qslab_stats, 0, QTFILE}, 0, 0444},
36 {"free", {Qfree, 0, QTFILE}, 0, 0444},
37 {"kmemstat", {Qkmemstat, 0, QTFILE}, 0, 0444},
40 static struct chan *mem_attach(char *spec)
42 return devattach(devname(), spec);
45 static struct walkqid *mem_walk(struct chan *c, struct chan *nc, char **name,
48 return devwalk(c, nc, name, nname, mem_dir, ARRAY_SIZE(mem_dir), devgen);
51 static int mem_stat(struct chan *c, uint8_t *db, int n)
53 return devstat(c, db, n, mem_dir, ARRAY_SIZE(mem_dir), devgen);
56 /* Prints arena's stats to the sza, starting at sofar. Returns the new sofar.*/
57 static size_t fetch_arena_stats(struct arena *arena, struct sized_alloc *sza,
63 struct kmem_cache *kc_i;
66 size_t nr_imports = 0;
69 size_t amt_imported = 0;
70 size_t empty_hash_chain = 0;
71 size_t longest_hash_chain = 0;
73 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
74 "Arena: %s (%p)\n--------------\n", arena->name, arena);
75 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
76 "\tquantum: %d, qcache_max: %d\n", arena->quantum,
78 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
80 arena->source ? arena->source->name : "none");
81 spin_lock_irqsave(&arena->lock);
82 for (int i = 0; i < ARENA_NR_FREE_LISTS; i++) {
85 if (!BSD_LIST_EMPTY(&arena->free_segs[i])) {
86 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
87 "\tList of [2^%d - 2^%d):\n", i, i + 1);
88 BSD_LIST_FOREACH(bt_i, &arena->free_segs[i], misc_link)
90 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
91 "\t\tNr free segs: %d\n", j);
94 for (int i = 0; i < arena->hh.nr_hash_lists; i++) {
97 if (BSD_LIST_EMPTY(&arena->alloc_hash[i]))
99 BSD_LIST_FOREACH(bt_i, &arena->alloc_hash[i], misc_link)
101 longest_hash_chain = MAX(longest_hash_chain, j);
103 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
104 "\tSegments:\n\t--------------\n");
105 for (rb_i = rb_first(&arena->all_segs); rb_i; rb_i = rb_next(rb_i)) {
106 bt_i = container_of(rb_i, struct btag, all_link);
107 if (bt_i->status == BTAG_SPAN) {
109 amt_imported += bt_i->size;
111 if (bt_i->status == BTAG_FREE)
112 amt_free += bt_i->size;
113 if (bt_i->status == BTAG_ALLOC) {
115 amt_alloc += bt_i->size;
118 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
119 "\tStats:\n\t-----------------\n");
120 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
121 "\t\tAmt free: %llu (%p)\n", amt_free, amt_free);
122 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
123 "\t\tAmt alloc: %llu (%p), nr allocs %d\n",
124 amt_alloc, amt_alloc, nr_allocs);
125 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
126 "\t\tAmt total segs: %llu, amt alloc segs %llu\n",
127 arena->amt_total_segs, arena->amt_alloc_segs);
128 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
129 "\t\tAmt imported: %llu (%p), nr imports %d\n",
130 amt_imported, amt_imported, nr_imports);
131 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
132 "\t\tNr hash %d, empty hash: %d, longest hash %d\n",
133 arena->hh.nr_hash_lists, empty_hash_chain,
135 spin_unlock_irqsave(&arena->lock);
136 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
137 "\tImporting Arenas:\n\t-----------------\n");
138 TAILQ_FOREACH(a_i, &arena->__importing_arenas, import_link)
139 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
140 "\t\t%s\n", a_i->name);
141 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
142 "\tImporting Slabs:\n\t-----------------\n");
143 TAILQ_FOREACH(kc_i, &arena->__importing_slabs, import_link)
144 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
145 "\t\t%s\n", kc_i->name);
149 static struct sized_alloc *build_arena_stats(void)
151 struct sized_alloc *sza;
153 size_t alloc_amt = 0;
156 qlock(&arenas_and_slabs_lock);
157 /* Rough guess about how many chars per arena we'll need. */
158 TAILQ_FOREACH(a_i, &all_arenas, next)
160 sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
161 TAILQ_FOREACH(a_i, &all_arenas, next)
162 sofar = fetch_arena_stats(a_i, sza, sofar);
163 qunlock(&arenas_and_slabs_lock);
167 /* Prints arena's stats to the sza, starting at sofar. Returns the new sofar.*/
168 static size_t fetch_slab_stats(struct kmem_cache *kc, struct sized_alloc *sza,
171 struct kmem_slab *s_i;
172 struct kmem_bufctl *bc_i;
174 size_t nr_unalloc_objs = 0;
175 size_t empty_hash_chain = 0;
176 size_t longest_hash_chain = 0;
178 spin_lock_irqsave(&kc->cache_lock);
179 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
180 "\nKmem_cache: %s\n---------------------\n", kc->name);
181 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
182 "Source: %s\n", kc->source->name);
183 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
184 "Objsize (incl align): %d\n", kc->obj_size);
185 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
186 "Align: %d\n", kc->align);
187 TAILQ_FOREACH(s_i, &kc->empty_slab_list, link) {
188 assert(!s_i->num_busy_obj);
189 nr_unalloc_objs += s_i->num_total_obj;
191 TAILQ_FOREACH(s_i, &kc->partial_slab_list, link)
192 nr_unalloc_objs += s_i->num_total_obj - s_i->num_busy_obj;
193 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
194 "Nr unallocated in slab layer: %lu\n", nr_unalloc_objs);
195 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
196 "Nr allocated from slab layer: %d\n", kc->nr_cur_alloc);
197 for (int i = 0; i < kc->hh.nr_hash_lists; i++) {
200 if (BSD_LIST_EMPTY(&kc->alloc_hash[i]))
202 BSD_LIST_FOREACH(bc_i, &kc->alloc_hash[i], link)
204 longest_hash_chain = MAX(longest_hash_chain, j);
206 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
207 "Nr hash %d, empty hash: %d, longest hash %d, loadlim %d\n",
208 kc->hh.nr_hash_lists, empty_hash_chain,
209 longest_hash_chain, kc->hh.load_limit);
210 spin_unlock_irqsave(&kc->cache_lock);
211 spin_lock_irqsave(&kc->depot.lock);
212 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
213 "Depot magsize: %d\n", kc->depot.magsize);
214 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
215 "Nr empty mags: %d\n", kc->depot.nr_empty);
216 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
217 "Nr non-empty mags: %d\n", kc->depot.nr_not_empty);
218 spin_unlock_irqsave(&kc->depot.lock);
222 static struct sized_alloc *build_slab_stats(void)
224 struct sized_alloc *sza;
226 size_t alloc_amt = 0;
227 struct kmem_cache *kc_i;
229 qlock(&arenas_and_slabs_lock);
230 TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
232 sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
233 TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
234 sofar = fetch_slab_stats(kc_i, sza, sofar);
235 qunlock(&arenas_and_slabs_lock);
239 static struct sized_alloc *build_free(void)
242 struct sized_alloc *sza;
244 size_t amt_total = 0;
245 size_t amt_alloc = 0;
247 sza = sized_kzmalloc(500, MEM_WAIT);
248 qlock(&arenas_and_slabs_lock);
249 TAILQ_FOREACH(a_i, &all_arenas, next) {
252 amt_total += a_i->amt_total_segs;
253 amt_alloc += a_i->amt_alloc_segs;
255 qunlock(&arenas_and_slabs_lock);
256 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
257 "Total Memory : %15llu\n", amt_total);
258 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
259 "Used Memory : %15llu\n", amt_alloc);
260 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
261 "Free Memory : %15llu\n", amt_total - amt_alloc);
265 #define KMEMSTAT_NAME 30
266 #define KMEMSTAT_OBJSIZE 8
267 #define KMEMSTAT_TOTAL 15
268 #define KMEMSTAT_ALLOCED 15
269 #define KMEMSTAT_NR_ALLOCS 12
270 #define KMEMSTAT_LINE_LN (8 + KMEMSTAT_NAME + KMEMSTAT_OBJSIZE + KMEMSTAT_TOTAL\
271 + KMEMSTAT_ALLOCED + KMEMSTAT_NR_ALLOCS)
273 const char kmemstat_fmt[] = "%-*s: %c :%*llu:%*llu:%*llu:%*llu\n";
274 const char kmemstat_hdr_fmt[] = "%-*s:Typ:%*s:%*s:%*s:%*s\n";
276 static size_t fetch_arena_line(struct arena *arena, struct sized_alloc *sza,
277 size_t sofar, int indent)
279 for (int i = 0; i < indent; i++)
280 sofar += snprintf(sza->buf + sofar, sza->size - sofar, " ");
281 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
283 KMEMSTAT_NAME - indent * 4, arena->name,
285 KMEMSTAT_OBJSIZE, arena->quantum,
286 KMEMSTAT_TOTAL, arena->amt_total_segs,
287 KMEMSTAT_ALLOCED, arena->amt_alloc_segs,
288 KMEMSTAT_NR_ALLOCS, arena->nr_allocs_ever);
292 static size_t fetch_slab_line(struct kmem_cache *kc, struct sized_alloc *sza,
293 size_t sofar, int indent)
295 struct kmem_pcpu_cache *pcc;
296 struct kmem_slab *s_i;
297 size_t nr_unalloc_objs = 0;
298 size_t nr_allocs_ever = 0;
300 spin_lock_irqsave(&kc->cache_lock);
301 TAILQ_FOREACH(s_i, &kc->empty_slab_list, link)
302 nr_unalloc_objs += s_i->num_total_obj;
303 TAILQ_FOREACH(s_i, &kc->partial_slab_list, link)
304 nr_unalloc_objs += s_i->num_total_obj - s_i->num_busy_obj;
305 spin_unlock_irqsave(&kc->cache_lock);
306 /* Lockless peak at the pcpu state */
307 for (int i = 0; i < kmc_nr_pcpu_caches(); i++) {
308 pcc = &kc->pcpu_caches[i];
309 nr_allocs_ever += pcc->nr_allocs_ever;
312 for (int i = 0; i < indent; i++)
313 sofar += snprintf(sza->buf + sofar, sza->size - sofar, " ");
314 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
316 KMEMSTAT_NAME - indent * 4, kc->name,
318 KMEMSTAT_OBJSIZE, kc->obj_size,
319 KMEMSTAT_TOTAL, kc->obj_size * (nr_unalloc_objs +
321 KMEMSTAT_ALLOCED, kc->obj_size * kc->nr_cur_alloc,
322 KMEMSTAT_NR_ALLOCS, nr_allocs_ever);
326 static size_t fetch_arena_and_kids(struct arena *arena, struct sized_alloc *sza,
327 size_t sofar, int indent)
330 struct kmem_cache *kc_i;
332 sofar = fetch_arena_line(arena, sza, sofar, indent);
333 TAILQ_FOREACH(a_i, &arena->__importing_arenas, import_link)
334 sofar = fetch_arena_and_kids(a_i, sza, sofar, indent + 1);
335 TAILQ_FOREACH(kc_i, &arena->__importing_slabs, import_link)
336 sofar = fetch_slab_line(kc_i, sza, sofar, indent + 1);
340 static struct sized_alloc *build_kmemstat(void)
343 struct kmem_cache *kc_i;
344 struct sized_alloc *sza;
346 size_t alloc_amt = 100;
348 qlock(&arenas_and_slabs_lock);
349 TAILQ_FOREACH(a_i, &all_arenas, next)
351 TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
353 sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
354 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
356 KMEMSTAT_NAME, "Arena/Slab Name",
357 KMEMSTAT_OBJSIZE, "Objsize",
358 KMEMSTAT_TOTAL, "Total Amt",
359 KMEMSTAT_ALLOCED, "Alloc Amt",
360 KMEMSTAT_NR_ALLOCS, "Allocs Ever");
361 for (int i = 0; i < KMEMSTAT_LINE_LN; i++)
362 sofar += snprintf(sza->buf + sofar, sza->size - sofar, "-");
363 sofar += snprintf(sza->buf + sofar, sza->size - sofar, "\n");
364 TAILQ_FOREACH(a_i, &all_arenas, next) {
367 sofar = fetch_arena_and_kids(a_i, sza, sofar, 0);
369 qunlock(&arenas_and_slabs_lock);
373 static struct chan *mem_open(struct chan *c, int omode)
375 if (c->qid.type & QTDIR) {
376 if (openmode(omode) != O_READ)
377 error(EPERM, "Tried opening directory not read-only");
379 switch (c->qid.path) {
381 c->synth_buf = build_arena_stats();
384 c->synth_buf = build_slab_stats();
387 c->synth_buf = build_free();
390 c->synth_buf = build_kmemstat();
393 c->mode = openmode(omode);
399 static void mem_close(struct chan *c)
401 if (!(c->flag & COPEN))
403 switch (c->qid.path) {
413 static long mem_read(struct chan *c, void *ubuf, long n, int64_t offset)
415 struct sized_alloc *sza;
417 switch (c->qid.path) {
419 return devdirread(c, ubuf, n, mem_dir, ARRAY_SIZE(mem_dir),
426 return readmem(offset, ubuf, n, sza->buf, sza->size);
428 panic("Bad Qid %p!", c->qid.path);
433 static long mem_write(struct chan *c, void *ubuf, long n, int64_t offset)
435 switch (c->qid.path) {
437 error(EFAIL, "Unable to write to %s", devname());
442 struct dev mem_devtab __devtab = {
446 .shutdown = devshutdown,
447 .attach = mem_attach,
460 .chaninfo = devchaninfo,