slab: Properly account for allocs from slabs
[akaros.git] / kern / drivers / dev / mem.c
1 /* Copyright (c) 2016 Google Inc
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * #mem, memory diagnostics (arenas and slabs)
6  */
7
8 #include <ns.h>
9 #include <kmalloc.h>
10 #include <string.h>
11 #include <stdio.h>
12 #include <assert.h>
13 #include <error.h>
14 #include <syscall.h>
15 #include <sys/queue.h>
16
17 struct dev mem_devtab;
18
19 static char *devname(void)
20 {
21         return mem_devtab.name;
22 }
23
24 enum {
25         Qdir,
26         Qarena_stats,
27         Qslab_stats,
28         Qfree,
29         Qkmemstat,
30 };
31
32 static struct dirtab mem_dir[] = {
33         {".", {Qdir, 0, QTDIR}, 0, DMDIR | 0555},
34         {"arena_stats", {Qarena_stats, 0, QTFILE}, 0, 0444},
35         {"slab_stats", {Qslab_stats, 0, QTFILE}, 0, 0444},
36         {"free", {Qfree, 0, QTFILE}, 0, 0444},
37         {"kmemstat", {Qkmemstat, 0, QTFILE}, 0, 0444},
38 };
39
40 static struct chan *mem_attach(char *spec)
41 {
42         return devattach(devname(), spec);
43 }
44
45 static struct walkqid *mem_walk(struct chan *c, struct chan *nc, char **name,
46                                                                 int nname)
47 {
48         return devwalk(c, nc, name, nname, mem_dir, ARRAY_SIZE(mem_dir), devgen);
49 }
50
51 static int mem_stat(struct chan *c, uint8_t *db, int n)
52 {
53         return devstat(c, db, n, mem_dir, ARRAY_SIZE(mem_dir), devgen);
54 }
55
56 /* Prints arena's stats to the sza, starting at sofar.  Returns the new sofar.*/
57 static size_t fetch_arena_stats(struct arena *arena, struct sized_alloc *sza,
58                                 size_t sofar)
59 {
60         struct btag *bt_i;
61         struct rb_node *rb_i;
62         struct arena *a_i;
63         struct kmem_cache *kc_i;
64
65         size_t nr_allocs = 0;
66         size_t nr_imports = 0;
67         size_t amt_alloc = 0;
68         size_t amt_free = 0;
69         size_t amt_imported = 0;
70         size_t empty_hash_chain = 0;
71         size_t longest_hash_chain = 0;
72
73         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
74                           "Arena: %s (%p)\n--------------\n", arena->name, arena);
75         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
76                           "\tquantum: %d, qcache_max: %d\n", arena->quantum,
77                           arena->qcache_max);
78         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
79                           "\tsource: %s\n",
80                           arena->source ? arena->source->name : "none");
81         spin_lock_irqsave(&arena->lock);
82         for (int i = 0; i < ARENA_NR_FREE_LISTS; i++) {
83                 int j = 0;
84
85                 if (!BSD_LIST_EMPTY(&arena->free_segs[i])) {
86                         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
87                                           "\tList of [2^%d - 2^%d):\n", i, i + 1);
88                         BSD_LIST_FOREACH(bt_i, &arena->free_segs[i], misc_link)
89                                 j++;
90                         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
91                                           "\t\tNr free segs: %d\n", j);
92                 }
93         }
94         for (int i = 0; i < arena->hh.nr_hash_lists; i++) {
95                 int j = 0;
96
97                 if (BSD_LIST_EMPTY(&arena->alloc_hash[i]))
98                         empty_hash_chain++;
99                 BSD_LIST_FOREACH(bt_i, &arena->alloc_hash[i], misc_link)
100                         j++;
101                 longest_hash_chain = MAX(longest_hash_chain, j);
102         }
103         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
104                           "\tSegments:\n\t--------------\n");
105         for (rb_i = rb_first(&arena->all_segs); rb_i; rb_i = rb_next(rb_i)) {
106                 bt_i = container_of(rb_i, struct btag, all_link);
107                 if (bt_i->status == BTAG_SPAN) {
108                         nr_imports++;
109                         amt_imported += bt_i->size;
110                 }
111                 if (bt_i->status == BTAG_FREE)
112                         amt_free += bt_i->size;
113                 if (bt_i->status == BTAG_ALLOC) {
114                         nr_allocs++;
115                         amt_alloc += bt_i->size;
116                 }
117         }
118         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
119                           "\tStats:\n\t-----------------\n");
120         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
121                           "\t\tAmt free: %llu (%p)\n", amt_free, amt_free);
122         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
123                           "\t\tAmt alloc: %llu (%p), nr allocs %d\n",
124                           amt_alloc, amt_alloc, nr_allocs);
125         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
126                           "\t\tAmt total segs: %llu, amt alloc segs %llu\n",
127                           arena->amt_total_segs, arena->amt_alloc_segs);
128         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
129                           "\t\tAmt imported: %llu (%p), nr imports %d\n",
130                                           amt_imported, amt_imported, nr_imports);
131         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
132                           "\t\tNr hash %d, empty hash: %d, longest hash %d\n",
133                           arena->hh.nr_hash_lists, empty_hash_chain,
134                                           longest_hash_chain);
135         spin_unlock_irqsave(&arena->lock);
136         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
137                           "\tImporting Arenas:\n\t-----------------\n");
138         TAILQ_FOREACH(a_i, &arena->__importing_arenas, import_link)
139                 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
140                                   "\t\t%s\n", a_i->name);
141         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
142                           "\tImporting Slabs:\n\t-----------------\n");
143         TAILQ_FOREACH(kc_i, &arena->__importing_slabs, import_link)
144                 sofar += snprintf(sza->buf + sofar, sza->size - sofar,
145                                   "\t\t%s\n", kc_i->name);
146         return sofar;
147 }
148
149 static struct sized_alloc *build_arena_stats(void)
150 {
151         struct sized_alloc *sza;
152         size_t sofar = 0;
153         size_t alloc_amt = 0;
154         struct arena *a_i;
155
156         qlock(&arenas_and_slabs_lock);
157         /* Rough guess about how many chars per arena we'll need. */
158         TAILQ_FOREACH(a_i, &all_arenas, next)
159                 alloc_amt += 1000;
160         sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
161         TAILQ_FOREACH(a_i, &all_arenas, next)
162                 sofar = fetch_arena_stats(a_i, sza, sofar);
163         qunlock(&arenas_and_slabs_lock);
164         return sza;
165 }
166
167 /* Prints arena's stats to the sza, starting at sofar.  Returns the new sofar.*/
168 static size_t fetch_slab_stats(struct kmem_cache *kc, struct sized_alloc *sza,
169                                size_t sofar)
170 {
171         struct kmem_slab *s_i;
172         struct kmem_bufctl *bc_i;
173
174         size_t nr_unalloc_objs = 0;
175         size_t empty_hash_chain = 0;
176         size_t longest_hash_chain = 0;
177
178         spin_lock_irqsave(&kc->cache_lock);
179         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
180                           "\nKmem_cache: %s\n---------------------\n", kc->name);
181         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
182                           "Source: %s\n", kc->source->name);
183         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
184                           "Objsize (incl align): %d\n", kc->obj_size);
185         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
186                           "Align: %d\n", kc->align);
187         TAILQ_FOREACH(s_i, &kc->empty_slab_list, link) {
188                 assert(!s_i->num_busy_obj);
189                 nr_unalloc_objs += s_i->num_total_obj;
190         }
191         TAILQ_FOREACH(s_i, &kc->partial_slab_list, link)
192                 nr_unalloc_objs += s_i->num_total_obj - s_i->num_busy_obj;
193         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
194                           "Nr unallocated in slab layer: %lu\n", nr_unalloc_objs);
195         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
196                           "Nr allocated from slab layer: %d\n", kc->nr_cur_alloc);
197         for (int i = 0; i < kc->hh.nr_hash_lists; i++) {
198                 int j = 0;
199
200                 if (BSD_LIST_EMPTY(&kc->alloc_hash[i]))
201                         empty_hash_chain++;
202                 BSD_LIST_FOREACH(bc_i, &kc->alloc_hash[i], link)
203                         j++;
204                 longest_hash_chain = MAX(longest_hash_chain, j);
205         }
206         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
207                           "Nr hash %d, empty hash: %d, longest hash %d, loadlim %d\n",
208                           kc->hh.nr_hash_lists, empty_hash_chain,
209                                           longest_hash_chain, kc->hh.load_limit);
210         spin_unlock_irqsave(&kc->cache_lock);
211         spin_lock_irqsave(&kc->depot.lock);
212         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
213                           "Depot magsize: %d\n", kc->depot.magsize);
214         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
215                           "Nr empty mags: %d\n", kc->depot.nr_empty);
216         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
217                           "Nr non-empty mags: %d\n", kc->depot.nr_not_empty);
218         spin_unlock_irqsave(&kc->depot.lock);
219         return sofar;
220 }
221
222 static struct sized_alloc *build_slab_stats(void)
223 {
224         struct sized_alloc *sza;
225         size_t sofar = 0;
226         size_t alloc_amt = 0;
227         struct kmem_cache *kc_i;
228
229         qlock(&arenas_and_slabs_lock);
230         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
231                 alloc_amt += 500;
232         sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
233         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
234                 sofar = fetch_slab_stats(kc_i, sza, sofar);
235         qunlock(&arenas_and_slabs_lock);
236         return sza;
237 }
238
239 static struct sized_alloc *build_free(void)
240 {
241         struct arena *a_i;
242         struct sized_alloc *sza;
243         size_t sofar = 0;
244         size_t amt_total = 0;
245         size_t amt_alloc = 0;
246
247         sza = sized_kzmalloc(500, MEM_WAIT);
248         qlock(&arenas_and_slabs_lock);
249         TAILQ_FOREACH(a_i, &all_arenas, next) {
250                 if (!a_i->is_base)
251                         continue;
252                 amt_total += a_i->amt_total_segs;
253                 amt_alloc += a_i->amt_alloc_segs;
254         }
255         qunlock(&arenas_and_slabs_lock);
256         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
257                           "Total Memory : %15llu\n", amt_total);
258         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
259                           "Used Memory  : %15llu\n", amt_alloc);
260         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
261                           "Free Memory  : %15llu\n", amt_total - amt_alloc);
262         return sza;
263 }
264
265 #define KMEMSTAT_NAME                   30
266 #define KMEMSTAT_OBJSIZE                8
267 #define KMEMSTAT_TOTAL                  15
268 #define KMEMSTAT_ALLOCED                15
269 #define KMEMSTAT_NR_ALLOCS              12
270 #define KMEMSTAT_LINE_LN (8 + KMEMSTAT_NAME + KMEMSTAT_OBJSIZE + KMEMSTAT_TOTAL\
271                           + KMEMSTAT_ALLOCED + KMEMSTAT_NR_ALLOCS)
272
273 const char kmemstat_fmt[]     = "%-*s: %c :%*llu:%*llu:%*llu:%*llu\n";
274 const char kmemstat_hdr_fmt[] = "%-*s:Typ:%*s:%*s:%*s:%*s\n";
275
276 static size_t fetch_arena_line(struct arena *arena, struct sized_alloc *sza,
277                                size_t sofar, int indent)
278 {
279         for (int i = 0; i < indent; i++)
280                 sofar += snprintf(sza->buf + sofar, sza->size - sofar, "    ");
281         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
282                           kmemstat_fmt,
283                                           KMEMSTAT_NAME - indent * 4, arena->name,
284                                           'A',
285                                           KMEMSTAT_OBJSIZE, arena->quantum,
286                                           KMEMSTAT_TOTAL, arena->amt_total_segs,
287                                           KMEMSTAT_ALLOCED, arena->amt_alloc_segs,
288                                           KMEMSTAT_NR_ALLOCS, arena->nr_allocs_ever);
289         return sofar;
290 }
291
292 static size_t fetch_slab_line(struct kmem_cache *kc, struct sized_alloc *sza,
293                               size_t sofar, int indent)
294 {
295         struct kmem_pcpu_cache *pcc;
296         struct kmem_slab *s_i;
297         size_t nr_unalloc_objs = 0;
298         size_t nr_allocs_ever = 0;
299
300         spin_lock_irqsave(&kc->cache_lock);
301         TAILQ_FOREACH(s_i, &kc->empty_slab_list, link)
302                 nr_unalloc_objs += s_i->num_total_obj;
303         TAILQ_FOREACH(s_i, &kc->partial_slab_list, link)
304                 nr_unalloc_objs += s_i->num_total_obj - s_i->num_busy_obj;
305         nr_allocs_ever = kc->nr_direct_allocs_ever;
306         spin_unlock_irqsave(&kc->cache_lock);
307         /* Lockless peak at the pcpu state */
308         for (int i = 0; i < kmc_nr_pcpu_caches(); i++) {
309                 pcc = &kc->pcpu_caches[i];
310                 nr_allocs_ever += pcc->nr_allocs_ever;
311         }
312
313         for (int i = 0; i < indent; i++)
314                 sofar += snprintf(sza->buf + sofar, sza->size - sofar, "    ");
315         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
316                           kmemstat_fmt,
317                                           KMEMSTAT_NAME - indent * 4, kc->name,
318                                           'S',
319                                           KMEMSTAT_OBJSIZE, kc->obj_size,
320                                           KMEMSTAT_TOTAL, kc->obj_size * (nr_unalloc_objs +
321                                                                           kc->nr_cur_alloc),
322                                           KMEMSTAT_ALLOCED, kc->obj_size * kc->nr_cur_alloc,
323                                           KMEMSTAT_NR_ALLOCS, nr_allocs_ever);
324         return sofar;
325 }
326
327 static size_t fetch_arena_and_kids(struct arena *arena, struct sized_alloc *sza,
328                                    size_t sofar, int indent)
329 {
330         struct arena *a_i;
331         struct kmem_cache *kc_i;
332
333         sofar = fetch_arena_line(arena, sza, sofar, indent);
334         TAILQ_FOREACH(a_i, &arena->__importing_arenas, import_link)
335                 sofar = fetch_arena_and_kids(a_i, sza, sofar, indent + 1);
336         TAILQ_FOREACH(kc_i, &arena->__importing_slabs, import_link)
337                 sofar = fetch_slab_line(kc_i, sza, sofar, indent + 1);
338         return sofar;
339 }
340
341 static struct sized_alloc *build_kmemstat(void)
342 {
343         struct arena *a_i;
344         struct kmem_cache *kc_i;
345         struct sized_alloc *sza;
346         size_t sofar = 0;
347         size_t alloc_amt = 100;
348
349         qlock(&arenas_and_slabs_lock);
350         TAILQ_FOREACH(a_i, &all_arenas, next)
351                 alloc_amt += 100;
352         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
353                 alloc_amt += 100;
354         sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
355         sofar += snprintf(sza->buf + sofar, sza->size - sofar,
356                           kmemstat_hdr_fmt,
357                                           KMEMSTAT_NAME, "Arena/Slab Name",
358                                           KMEMSTAT_OBJSIZE, "Objsize",
359                                           KMEMSTAT_TOTAL, "Total Amt",
360                                           KMEMSTAT_ALLOCED, "Alloc Amt",
361                                           KMEMSTAT_NR_ALLOCS, "Allocs Ever");
362         for (int i = 0; i < KMEMSTAT_LINE_LN; i++)
363                 sofar += snprintf(sza->buf + sofar, sza->size - sofar, "-");
364         sofar += snprintf(sza->buf + sofar, sza->size - sofar, "\n");
365         TAILQ_FOREACH(a_i, &all_arenas, next) {
366                 if (a_i->source)
367                         continue;
368                 sofar = fetch_arena_and_kids(a_i, sza, sofar, 0);
369         }
370         qunlock(&arenas_and_slabs_lock);
371         return sza;
372 }
373
374 static struct chan *mem_open(struct chan *c, int omode)
375 {
376         if (c->qid.type & QTDIR) {
377                 if (openmode(omode) != O_READ)
378                         error(EPERM, "Tried opening directory not read-only");
379         }
380         switch (c->qid.path) {
381         case Qarena_stats:
382                 c->synth_buf = build_arena_stats();
383                 break;
384         case Qslab_stats:
385                 c->synth_buf = build_slab_stats();
386                 break;
387         case Qfree:
388                 c->synth_buf = build_free();
389                 break;
390         case Qkmemstat:
391                 c->synth_buf = build_kmemstat();
392                 break;
393         }
394         c->mode = openmode(omode);
395         c->flag |= COPEN;
396         c->offset = 0;
397         return c;
398 }
399
400 static void mem_close(struct chan *c)
401 {
402         if (!(c->flag & COPEN))
403                 return;
404         switch (c->qid.path) {
405         case Qarena_stats:
406         case Qslab_stats:
407         case Qfree:
408         case Qkmemstat:
409                 kfree(c->synth_buf);
410                 break;
411         }
412 }
413
414 static long mem_read(struct chan *c, void *ubuf, long n, int64_t offset)
415 {
416         struct sized_alloc *sza;
417
418         switch (c->qid.path) {
419         case Qdir:
420                 return devdirread(c, ubuf, n, mem_dir, ARRAY_SIZE(mem_dir),
421                                                   devgen);
422         case Qarena_stats:
423         case Qslab_stats:
424         case Qfree:
425         case Qkmemstat:
426                 sza = c->synth_buf;
427                 return readmem(offset, ubuf, n, sza->buf, sza->size);
428         default:
429                 panic("Bad Qid %p!", c->qid.path);
430         }
431         return -1;
432 }
433
434 static long mem_write(struct chan *c, void *ubuf, long n, int64_t offset)
435 {
436         switch (c->qid.path) {
437         default:
438                 error(EFAIL, "Unable to write to %s", devname());
439         }
440         return n;
441 }
442
443 struct dev mem_devtab __devtab = {
444         .name = "mem",
445         .reset = devreset,
446         .init = devinit,
447         .shutdown = devshutdown,
448         .attach = mem_attach,
449         .walk = mem_walk,
450         .stat = mem_stat,
451         .open = mem_open,
452         .create = devcreate,
453         .close = mem_close,
454         .read = mem_read,
455         .bread = devbread,
456         .write = mem_write,
457         .bwrite = devbwrite,
458         .remove = devremove,
459         .wstat = devwstat,
460         .power = devpower,
461         .chaninfo = devchaninfo,
462 };