kconfig: use pkg-config for ncurses detection
[akaros.git] / kern / drivers / dev / mem.c
1 /* Copyright (c) 2016 Google Inc
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * #mem, memory diagnostics (arenas and slabs)
6  */
7
8 #include <ns.h>
9 #include <kmalloc.h>
10 #include <string.h>
11 #include <stdio.h>
12 #include <assert.h>
13 #include <error.h>
14 #include <syscall.h>
15 #include <sys/queue.h>
16
17 struct dev mem_devtab;
18
19 static char *devname(void)
20 {
21         return mem_devtab.name;
22 }
23
24 enum {
25         Qdir,
26         Qarena_stats,
27         Qslab_stats,
28         Qfree,
29         Qkmemstat,
30         Qslab_trace,
31 };
32
33 static struct dirtab mem_dir[] = {
34         {".", {Qdir, 0, QTDIR}, 0, DMDIR | 0555},
35         {"arena_stats", {Qarena_stats, 0, QTFILE}, 0, 0444},
36         {"slab_stats", {Qslab_stats, 0, QTFILE}, 0, 0444},
37         {"free", {Qfree, 0, QTFILE}, 0, 0444},
38         {"kmemstat", {Qkmemstat, 0, QTFILE}, 0, 0444},
39         {"slab_trace", {Qslab_trace, 0, QTFILE}, 0, 0444},
40 };
41
42 /* Protected by the arenas_and_slabs_lock */
43 static struct sized_alloc *slab_trace_data;
44
45 static struct chan *mem_attach(char *spec)
46 {
47         return devattach(devname(), spec);
48 }
49
50 static struct walkqid *mem_walk(struct chan *c, struct chan *nc, char **name,
51                                 unsigned int nname)
52 {
53         return devwalk(c, nc, name, nname, mem_dir, ARRAY_SIZE(mem_dir),
54                        devgen);
55 }
56
57 static size_t mem_stat(struct chan *c, uint8_t *db, size_t n)
58 {
59         return devstat(c, db, n, mem_dir, ARRAY_SIZE(mem_dir), devgen);
60 }
61
62 /* Prints arena's stats to the sza, adjusting the sza's sofar. */
63 static void fetch_arena_stats(struct arena *arena, struct sized_alloc *sza)
64 {
65         struct btag *bt_i;
66         struct rb_node *rb_i;
67         struct arena *a_i;
68         struct kmem_cache *kc_i;
69
70         size_t nr_allocs = 0;
71         size_t nr_imports = 0;
72         size_t amt_alloc = 0;
73         size_t amt_free = 0;
74         size_t amt_imported = 0;
75         size_t empty_hash_chain = 0;
76         size_t longest_hash_chain = 0;
77
78         sza_printf(sza, "Arena: %s (%p)\n--------------\n", arena->name, arena);
79         sza_printf(sza, "\tquantum: %d, qcache_max: %d\n", arena->quantum,
80                    arena->qcache_max);
81         sza_printf(sza, "\tsource: %s\n",
82                    arena->source ? arena->source->name : "none");
83         spin_lock_irqsave(&arena->lock);
84         for (int i = 0; i < ARENA_NR_FREE_LISTS; i++) {
85                 int j = 0;
86
87                 if (!BSD_LIST_EMPTY(&arena->free_segs[i])) {
88                         sza_printf(sza, "\tList of [2^%d - 2^%d):\n", i, i + 1);
89                         BSD_LIST_FOREACH(bt_i, &arena->free_segs[i], misc_link)
90                                 j++;
91                         sza_printf(sza, "\t\tNr free segs: %d\n", j);
92                 }
93         }
94         for (int i = 0; i < arena->hh.nr_hash_lists; i++) {
95                 int j = 0;
96
97                 if (BSD_LIST_EMPTY(&arena->alloc_hash[i]))
98                         empty_hash_chain++;
99                 BSD_LIST_FOREACH(bt_i, &arena->alloc_hash[i], misc_link)
100                         j++;
101                 longest_hash_chain = MAX(longest_hash_chain, j);
102         }
103         sza_printf(sza, "\tSegments:\n\t--------------\n");
104         for (rb_i = rb_first(&arena->all_segs); rb_i; rb_i = rb_next(rb_i)) {
105                 bt_i = container_of(rb_i, struct btag, all_link);
106                 if (bt_i->status == BTAG_SPAN) {
107                         nr_imports++;
108                         amt_imported += bt_i->size;
109                 }
110                 if (bt_i->status == BTAG_FREE)
111                         amt_free += bt_i->size;
112                 if (bt_i->status == BTAG_ALLOC) {
113                         nr_allocs++;
114                         amt_alloc += bt_i->size;
115                 }
116         }
117         sza_printf(sza, "\tStats:\n\t-----------------\n");
118         sza_printf(sza, "\t\tAmt free: %llu (%p)\n", amt_free, amt_free);
119         sza_printf(sza, "\t\tAmt alloc: %llu (%p), nr allocs %d\n",
120                    amt_alloc, amt_alloc, nr_allocs);
121         sza_printf(sza, "\t\tAmt total segs: %llu, amt alloc segs %llu\n",
122                    arena->amt_total_segs, arena->amt_alloc_segs);
123         sza_printf(sza, "\t\tAmt imported: %llu (%p), nr imports %d\n",
124                    amt_imported, amt_imported, nr_imports);
125         sza_printf(sza, "\t\tNr hash %d, empty hash: %d, longest hash %d\n",
126                    arena->hh.nr_hash_lists, empty_hash_chain,
127                    longest_hash_chain);
128         spin_unlock_irqsave(&arena->lock);
129         sza_printf(sza, "\tImporting Arenas:\n\t-----------------\n");
130         TAILQ_FOREACH(a_i, &arena->__importing_arenas, import_link)
131                 sza_printf(sza, "\t\t%s\n", a_i->name);
132         sza_printf(sza, "\tImporting Slabs:\n\t-----------------\n");
133         TAILQ_FOREACH(kc_i, &arena->__importing_slabs, import_link)
134                 sza_printf(sza, "\t\t%s\n", kc_i->name);
135 }
136
137 static struct sized_alloc *build_arena_stats(void)
138 {
139         struct sized_alloc *sza;
140         size_t alloc_amt = 0;
141         struct arena *a_i;
142
143         qlock(&arenas_and_slabs_lock);
144         /* Rough guess about how many chars per arena we'll need. */
145         TAILQ_FOREACH(a_i, &all_arenas, next)
146                 alloc_amt += 1000;
147         sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
148         TAILQ_FOREACH(a_i, &all_arenas, next)
149                 fetch_arena_stats(a_i, sza);
150         qunlock(&arenas_and_slabs_lock);
151         return sza;
152 }
153
154 /* Prints arena's stats to the sza, updating its sofar. */
155 static void fetch_slab_stats(struct kmem_cache *kc, struct sized_alloc *sza)
156 {
157         struct kmem_slab *s_i;
158         struct kmem_bufctl *bc_i;
159
160         size_t nr_unalloc_objs = 0;
161         size_t empty_hash_chain = 0;
162         size_t longest_hash_chain = 0;
163
164         spin_lock_irqsave(&kc->cache_lock);
165         sza_printf(sza, "\nKmem_cache: %s\n---------------------\n", kc->name);
166         sza_printf(sza, "Source: %s\n", kc->source->name);
167         sza_printf(sza, "Objsize (incl align): %d\n", kc->obj_size);
168         sza_printf(sza, "Align: %d\n", kc->align);
169         TAILQ_FOREACH(s_i, &kc->empty_slab_list, link) {
170                 assert(!s_i->num_busy_obj);
171                 nr_unalloc_objs += s_i->num_total_obj;
172         }
173         TAILQ_FOREACH(s_i, &kc->partial_slab_list, link)
174                 nr_unalloc_objs += s_i->num_total_obj - s_i->num_busy_obj;
175         sza_printf(sza, "Nr unallocated in slab layer: %lu\n", nr_unalloc_objs);
176         sza_printf(sza, "Nr allocated from slab layer: %d\n", kc->nr_cur_alloc);
177         for (int i = 0; i < kc->hh.nr_hash_lists; i++) {
178                 int j = 0;
179
180                 if (SLIST_EMPTY(&kc->alloc_hash[i]))
181                         empty_hash_chain++;
182                 SLIST_FOREACH(bc_i, &kc->alloc_hash[i], link)
183                         j++;
184                 longest_hash_chain = MAX(longest_hash_chain, j);
185         }
186         sza_printf(sza,
187                    "Nr hash %d, empty hash: %d, longest hash %d, loadlim %d\n",
188                    kc->hh.nr_hash_lists, empty_hash_chain,
189                    longest_hash_chain, kc->hh.load_limit);
190         spin_unlock_irqsave(&kc->cache_lock);
191         spin_lock_irqsave(&kc->depot.lock);
192         sza_printf(sza, "Depot magsize: %d\n", kc->depot.magsize);
193         sza_printf(sza, "Nr empty mags: %d\n", kc->depot.nr_empty);
194         sza_printf(sza, "Nr non-empty mags: %d\n", kc->depot.nr_not_empty);
195         spin_unlock_irqsave(&kc->depot.lock);
196 }
197
198 static struct sized_alloc *build_slab_stats(void)
199 {
200         struct sized_alloc *sza;
201         size_t alloc_amt = 0;
202         struct kmem_cache *kc_i;
203
204         qlock(&arenas_and_slabs_lock);
205         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
206                 alloc_amt += 500;
207         sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
208         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
209                 fetch_slab_stats(kc_i, sza);
210         qunlock(&arenas_and_slabs_lock);
211         return sza;
212 }
213
214 static struct sized_alloc *build_free(void)
215 {
216         struct arena *a_i;
217         struct sized_alloc *sza;
218         size_t amt_total = 0;
219         size_t amt_alloc = 0;
220
221         sza = sized_kzmalloc(500, MEM_WAIT);
222         qlock(&arenas_and_slabs_lock);
223         TAILQ_FOREACH(a_i, &all_arenas, next) {
224                 if (!a_i->is_base)
225                         continue;
226                 amt_total += a_i->amt_total_segs;
227                 amt_alloc += a_i->amt_alloc_segs;
228         }
229         qunlock(&arenas_and_slabs_lock);
230         sza_printf(sza, "Total Memory : %15llu\n", amt_total);
231         sza_printf(sza, "Used Memory  : %15llu\n", amt_alloc);
232         sza_printf(sza, "Free Memory  : %15llu\n", amt_total - amt_alloc);
233         return sza;
234 }
235
236 #define KMEMSTAT_NAME                   30
237 #define KMEMSTAT_OBJSIZE                8
238 #define KMEMSTAT_TOTAL                  15
239 #define KMEMSTAT_ALLOCED                15
240 #define KMEMSTAT_NR_ALLOCS              12
241 #define KMEMSTAT_LINE_LN (8 + KMEMSTAT_NAME + KMEMSTAT_OBJSIZE + KMEMSTAT_TOTAL\
242                           + KMEMSTAT_ALLOCED + KMEMSTAT_NR_ALLOCS)
243
244 const char kmemstat_fmt[]     = "%-*s: %c :%*llu:%*llu:%*llu:%*llu\n";
245 const char kmemstat_hdr_fmt[] = "%-*s:Typ:%*s:%*s:%*s:%*s\n";
246
247 static void fetch_arena_line(struct arena *arena, struct sized_alloc *sza,
248                              int indent)
249 {
250         for (int i = 0; i < indent; i++)
251                 sza_printf(sza, "    ");
252         sza_printf(sza, kmemstat_fmt,
253                    KMEMSTAT_NAME - indent * 4, arena->name,
254                    'A',
255                    KMEMSTAT_OBJSIZE, arena->quantum,
256                    KMEMSTAT_TOTAL, arena->amt_total_segs,
257                    KMEMSTAT_ALLOCED, arena->amt_alloc_segs,
258                    KMEMSTAT_NR_ALLOCS, arena->nr_allocs_ever);
259 }
260
261 static void fetch_slab_line(struct kmem_cache *kc, struct sized_alloc *sza,
262                             int indent)
263 {
264         struct kmem_pcpu_cache *pcc;
265         struct kmem_slab *s_i;
266         size_t nr_unalloc_objs = 0;
267         size_t nr_allocs_ever = 0;
268
269         spin_lock_irqsave(&kc->cache_lock);
270         TAILQ_FOREACH(s_i, &kc->empty_slab_list, link)
271                 nr_unalloc_objs += s_i->num_total_obj;
272         TAILQ_FOREACH(s_i, &kc->partial_slab_list, link)
273                 nr_unalloc_objs += s_i->num_total_obj - s_i->num_busy_obj;
274         nr_allocs_ever = kc->nr_direct_allocs_ever;
275         spin_unlock_irqsave(&kc->cache_lock);
276         /* Lockless peak at the pcpu state */
277         for (int i = 0; i < kmc_nr_pcpu_caches(); i++) {
278                 pcc = &kc->pcpu_caches[i];
279                 nr_allocs_ever += pcc->nr_allocs_ever;
280         }
281
282         for (int i = 0; i < indent; i++)
283                 sza_printf(sza, "    ");
284         sza_printf(sza, kmemstat_fmt,
285                    KMEMSTAT_NAME - indent * 4, kc->name,
286                    'S',
287                    KMEMSTAT_OBJSIZE, kc->obj_size,
288                    KMEMSTAT_TOTAL, kc->obj_size * (nr_unalloc_objs +
289                                                    kc->nr_cur_alloc),
290                    KMEMSTAT_ALLOCED, kc->obj_size * kc->nr_cur_alloc,
291                    KMEMSTAT_NR_ALLOCS, nr_allocs_ever);
292 }
293
294 static void fetch_arena_and_kids(struct arena *arena, struct sized_alloc *sza,
295                                  int indent)
296 {
297         struct arena *a_i;
298         struct kmem_cache *kc_i;
299
300         fetch_arena_line(arena, sza, indent);
301         TAILQ_FOREACH(a_i, &arena->__importing_arenas, import_link) {
302                 if (a_i == arena)
303                         continue;
304                 fetch_arena_and_kids(a_i, sza, indent + 1);
305         }
306         TAILQ_FOREACH(kc_i, &arena->__importing_slabs, import_link)
307                 fetch_slab_line(kc_i, sza, indent + 1);
308 }
309
310 static struct sized_alloc *build_kmemstat(void)
311 {
312         struct arena *a_i;
313         struct kmem_cache *kc_i;
314         struct sized_alloc *sza;
315         size_t alloc_amt = 100;
316
317         qlock(&arenas_and_slabs_lock);
318         TAILQ_FOREACH(a_i, &all_arenas, next)
319                 alloc_amt += 100;
320         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link)
321                 alloc_amt += 100;
322         sza = sized_kzmalloc(alloc_amt, MEM_WAIT);
323         sza_printf(sza, kmemstat_hdr_fmt,
324                    KMEMSTAT_NAME, "Arena/Slab Name",
325                    KMEMSTAT_OBJSIZE, "Objsize",
326                    KMEMSTAT_TOTAL, "Total Amt",
327                    KMEMSTAT_ALLOCED, "Alloc Amt",
328                    KMEMSTAT_NR_ALLOCS, "Allocs Ever");
329         for (int i = 0; i < KMEMSTAT_LINE_LN; i++)
330                 sza_printf(sza, "-");
331         sza_printf(sza, "\n");
332         TAILQ_FOREACH(a_i, &all_arenas, next) {
333                 if (a_i->source && a_i->source != a_i)
334                         continue;
335                 fetch_arena_and_kids(a_i, sza, 0);
336         }
337         qunlock(&arenas_and_slabs_lock);
338         return sza;
339 }
340
341 void kmemstat(void)
342 {
343         struct sized_alloc *sza = build_kmemstat();
344
345         printk("%s", sza->buf);
346 }
347
348 static struct chan *mem_open(struct chan *c, int omode)
349 {
350         if (c->qid.type & QTDIR) {
351                 if (openmode(omode) != O_READ)
352                         error(EPERM, "Tried opening directory not read-only");
353         }
354         switch (c->qid.path) {
355         case Qarena_stats:
356                 c->synth_buf = build_arena_stats();
357                 break;
358         case Qslab_stats:
359                 c->synth_buf = build_slab_stats();
360                 break;
361         case Qfree:
362                 c->synth_buf = build_free();
363                 break;
364         case Qkmemstat:
365                 c->synth_buf = build_kmemstat();
366                 break;
367         }
368         c->mode = openmode(omode);
369         c->flag |= COPEN;
370         c->offset = 0;
371         return c;
372 }
373
374 static void mem_close(struct chan *c)
375 {
376         if (!(c->flag & COPEN))
377                 return;
378         switch (c->qid.path) {
379         case Qarena_stats:
380         case Qslab_stats:
381         case Qfree:
382         case Qkmemstat:
383                 kfree(c->synth_buf);
384                 c->synth_buf = NULL;
385                 break;
386         }
387 }
388
389 static size_t slab_trace_read(struct chan *c, void *ubuf, size_t n,
390                               off64_t offset)
391 {
392         size_t ret = 0;
393
394         qlock(&arenas_and_slabs_lock);
395         if (slab_trace_data)
396                 ret = readstr(offset, ubuf, n, slab_trace_data->buf);
397         qunlock(&arenas_and_slabs_lock);
398         return ret;
399 }
400
401 static size_t mem_read(struct chan *c, void *ubuf, size_t n, off64_t offset)
402 {
403         struct sized_alloc *sza;
404
405         switch (c->qid.path) {
406         case Qdir:
407                 return devdirread(c, ubuf, n, mem_dir, ARRAY_SIZE(mem_dir),
408                                                   devgen);
409         case Qarena_stats:
410         case Qslab_stats:
411         case Qfree:
412         case Qkmemstat:
413                 sza = c->synth_buf;
414                 return readstr(offset, ubuf, n, sza->buf);
415         case Qslab_trace:
416                 return slab_trace_read(c, ubuf, n, offset);
417         default:
418                 panic("Bad Qid %p!", c->qid.path);
419         }
420         return -1;
421 }
422
423 /* start, then stop, then print, then read to get the trace */
424 #define SLAB_TRACE_USAGE "start|stop|print|reset SLAB_NAME"
425
426 static void slab_trace_cmd(struct chan *c, struct cmdbuf *cb)
427 {
428         ERRSTACK(1);
429         struct sized_alloc *sza, *old_sza;
430         struct kmem_cache *kc = NULL, *kc_i;
431
432         if (cb->nf < 2)
433                 error(EFAIL, SLAB_TRACE_USAGE);
434
435         qlock(&arenas_and_slabs_lock);
436         if (waserror()) {
437                 qunlock(&arenas_and_slabs_lock);
438                 nexterror();
439         }
440         TAILQ_FOREACH(kc_i, &all_kmem_caches, all_kmc_link) {
441                 if (!strcmp(kc_i->name, cb->f[1])) {
442                         if (kc) {
443                                 printk("[kernel] Multiple KC's named %s, tracing the first one\n",
444                                        kc->name);
445                                 break;
446                         }
447                         kc = kc_i;
448                 }
449         }
450         if (!kc)
451                 error(ENOENT, "No such slab %s", cb->f[1]);
452         /* Note that the only time we have a real sza is when printing.
453          * Otherwise, it's NULL.  We still want this to be the chan's sza, since
454          * the reader should get nothing back until they ask for a print. */
455         sza = NULL;
456         if (!strcmp(cb->f[0], "start")) {
457                 if (kmem_trace_start(kc))
458                         error(EFAIL, "Unable to trace slab %s", kc->name);
459         } else if (!strcmp(cb->f[0], "stop")) {
460                 kmem_trace_stop(kc);
461         } else if (!strcmp(cb->f[0], "print")) {
462                 sza = kmem_trace_print(kc);
463         } else if (!strcmp(cb->f[0], "reset")) {
464                 kmem_trace_reset(kc);
465         } else {
466                 error(EFAIL, SLAB_TRACE_USAGE);
467         }
468         old_sza = slab_trace_data;
469         slab_trace_data = sza;
470         qunlock(&arenas_and_slabs_lock);
471         poperror();
472         kfree(old_sza);
473 }
474
475 static size_t mem_write(struct chan *c, void *ubuf, size_t n, off64_t unused)
476 {
477         ERRSTACK(1);
478         struct cmdbuf *cb = parsecmd(ubuf, n);
479
480         if (waserror()) {
481                 kfree(cb);
482                 nexterror();
483         }
484         switch (c->qid.path) {
485         case Qslab_trace:
486                 slab_trace_cmd(c, cb);
487                 break;
488         default:
489                 error(EFAIL, "Unable to write to %s", devname());
490         }
491         kfree(cb);
492         poperror();
493         return n;
494 }
495
496 struct dev mem_devtab __devtab = {
497         .name = "mem",
498         .reset = devreset,
499         .init = devinit,
500         .shutdown = devshutdown,
501         .attach = mem_attach,
502         .walk = mem_walk,
503         .stat = mem_stat,
504         .open = mem_open,
505         .create = devcreate,
506         .close = mem_close,
507         .read = mem_read,
508         .bread = devbread,
509         .write = mem_write,
510         .bwrite = devbwrite,
511         .remove = devremove,
512         .wstat = devwstat,
513         .power = devpower,
514         .chaninfo = devchaninfo,
515 };