9ns: Fix devtab function pointer signatures
[akaros.git] / kern / drivers / dev / kprof.c
index 042d86e..cbc7b1c 100644 (file)
  * in the LICENSE file.
  */
 
-// get_fn_name is slowing down the kprocread 
-//     have an array of translated fns
-//     or a "next" iterator, since we're walking in order
-//
-// irqsave locks
-//
-// kprof struct should be a ptr, have them per core
-//             we'll probably need to track the length still, so userspace knows how
-//             big it is
-//
-//             will also want more files in the kprof dir for each cpu or something
-//             
-// maybe don't use slot 0 and 1 as total and 'not kernel' ticks
-//
-// fix the failed assert XXX
-
+#include <ros/profiler_records.h>
+#include <arch/time.h>
 #include <vfs.h>
-#include <kfs.h>
 #include <slab.h>
 #include <kmalloc.h>
 #include <kref.h>
+#include <atomic.h>
+#include <kthread.h>
 #include <string.h>
 #include <stdio.h>
 #include <assert.h>
 #include <error.h>
-#include <cpio.h>
 #include <pmap.h>
 #include <smp.h>
-#include <ip.h>
-#include <oprofile.h>
-
-#define LRES   3               /* log of PC resolution */
-#define CELLSIZE       8       /* sizeof of count cell */
+#include <time.h>
+#include <circular_buffer.h>
+#include <umem.h>
+#include <profiler.h>
+#include <kprof.h>
+#include <ros/procinfo.h>
+#include <init.h>
+
+#define KTRACE_BUFFER_SIZE (128 * 1024)
+#define TRACE_PRINTK_BUFFER_SIZE (8 * 1024)
+
+enum {
+       Kprofdirqid = 0,
+       Kprofdataqid,
+       Kprofctlqid,
+       Kptracectlqid,
+       Kptraceqid,
+       Kprintxqid,
+       Kmpstatqid,
+       Kmpstatrawqid,
+};
 
-struct kprof
-{
-       uintptr_t       minpc;
-       uintptr_t       maxpc;
-       int     nbuf;
-       int     time;
-       uint64_t        *buf;   /* keep in sync with cellsize */
-       size_t          buf_sz;
-       spinlock_t lock;
+struct trace_printk_buffer {
+       atomic_t in_use;
+       char buffer[TRACE_PRINTK_BUFFER_SIZE];
 };
-struct kprof kprof;
 
-/* output format. Nice fixed size. That makes it seekable.
- * small subtle bit here. You have to convert offset FROM FORMATSIZE units
- * to CELLSIZE units in a few places.
- */
-char *outformat = "%016llx %29.29s %016llx\n";
-#define FORMATSIZE 64
-enum{
-       Kprofdirqid,
-       Kprofdataqid,
-       Kprofctlqid,
-       Kprofoprofileqid,
+struct kprof {
+       qlock_t lock;
+       bool mpstat_ipi;
+       bool profiling;
+       bool opened;
 };
-struct dirtab kproftab[]={
-       {".",           {Kprofdirqid, 0, QTDIR},0,      DMDIR|0550},
-       {"kpdata",      {Kprofdataqid},         0,      0600},
-       {"kpctl",       {Kprofctlqid},          0,      0600},
-       {"kpoprofile",  {Kprofoprofileqid},     0,      0600},
+
+struct dev kprofdevtab;
+struct dirtab kproftab[] = {
+       {".",                   {Kprofdirqid,           0, QTDIR}, 0,   DMDIR|0550},
+       {"kpdata",              {Kprofdataqid},         0,      0600},
+       {"kpctl",               {Kprofctlqid},          0,      0600},
+       {"kptrace_ctl", {Kptracectlqid},        0,      0660},
+       {"kptrace",             {Kptraceqid},           0,      0600},
+       {"kprintx",             {Kprintxqid},           0,      0600},
+       {"mpstat",              {Kmpstatqid},           0,      0600},
+       {"mpstat-raw",  {Kmpstatrawqid},        0,      0600},
 };
 
-static struct chan*
-kprofattach(char *spec)
+static struct kprof kprof;
+static bool ktrace_init_done = FALSE;
+static spinlock_t ktrace_lock = SPINLOCK_INITIALIZER_IRQSAVE;
+static struct circular_buffer ktrace_data;
+static char ktrace_buffer[KTRACE_BUFFER_SIZE];
+static char kprof_control_usage[128];
+
+static size_t mpstat_len(void)
 {
-       uint32_t n;
-
-       /* allocate when first used */
-       kprof.minpc = 0xffffffffc2000000; //KERN_LOAD_ADDR;
-       kprof.maxpc = (uintptr_t)&etext;
-       kprof.nbuf = (kprof.maxpc-kprof.minpc) >> LRES;
-       n = kprof.nbuf*CELLSIZE;
-       if(kprof.buf == 0) {
-               printk("Allocate %d bytes\n", n);
-               kprof.buf = kzmalloc(n, KMALLOC_WAIT);
-               if(kprof.buf == 0)
-                       error(Enomem);
-       }
-       kproftab[1].length = kprof.nbuf * FORMATSIZE;
-       kprof.buf_sz = n;
-       /* NO, I'm not sure how we should do this yet. */
-       int alloc_cpu_buffers(void);
-       alloc_cpu_buffers();
-       return devattach('K', spec);
+       size_t each_row = 7 + NR_CPU_STATES * 26;
+
+       return each_row * (num_cores + 1) + 1;
 }
 
-static void
-kproftimer(uintptr_t pc)
+static size_t mpstatraw_len(void)
 {
-       if(kprof.time == 0)
-               return;
+       size_t header_row = 27 + NR_CPU_STATES * 7 + 1;
+       size_t cpu_row = 7 + NR_CPU_STATES * 17;
 
-       /*
-        * if the pc corresponds to the idle loop, don't consider it.
+       return header_row + cpu_row * num_cores + 1;
+}
 
-       if(m->inidle)
-               return;
-        */
-       /*
-        *  if the pc is coming out of spllo or splx,
-        *  use the pc saved when we went splhi.
+static char *devname(void)
+{
+       return kprofdevtab.name;
+}
 
-       if(pc>=PTR2UINT(spllo) && pc<=PTR2UINT(spldone))
-               pc = m->splpc;
-        */
+static struct chan *kprof_attach(char *spec)
+{
+       return devattach(devname(), spec);
+}
 
-//     ilock(&kprof);
-       /* this is weird. What we do is assume that all the time since the last
-        * measurement went into this PC. It's the best
-        * we can do I suppose. And we are sampling at 1 ms. for now.
-        * better ideas welcome.
-        */
-       kprof.buf[0] += 1; //Total count of ticks.
-       if(kprof.minpc<=pc && pc<kprof.maxpc){
-               pc -= kprof.minpc;
-               pc >>= LRES;
-               kprof.buf[pc] += 1;
-       }else
-               kprof.buf[1] += 1; // Why?
-//     iunlock(&kprof);
+/* Start collecting samples from perf events into the profiler.
+ *
+ * This command only runs if the user successfully opened kpctl, which gives
+ * them a profiler (the global profiler, for now). */
+static void kprof_start_profiler(void)
+{
+       ERRSTACK(1);
+
+       qlock(&kprof.lock);
+       if (waserror()) {
+               qunlock(&kprof.lock);
+               nexterror();
+       }
+       if (!kprof.profiling) {
+               profiler_start();
+               kprof.profiling = TRUE;
+       }
+       poperror();
+       qunlock(&kprof.lock);
 }
 
-static void setup_timers(void)
+/* Stops collecting samples from perf events.
+ *
+ * This command only runs if the user successfully opened kpctl, which gives
+ * them a profiler (the global profiler, for now). */
+static void kprof_stop_profiler(void)
 {
-       void dummy_alarm(struct alarm_waiter *waiter)
-       {
-               struct timer_chain *tchain = &per_cpu_info[core_id()].tchain;
-               set_awaiter_rel(waiter, 1000);
-               __set_alarm(tchain, waiter);
+       ERRSTACK(1);
+
+       qlock(&kprof.lock);
+       if (waserror()) {
+               qunlock(&kprof.lock);
+               nexterror();
        }
-       void kprof_irq_handler(struct hw_trapframe *hw_tf, void *data)
-       {
-               kproftimer(x86_get_hwtf_pc(hw_tf));     /* TODO arch */
+       if (kprof.profiling) {
+               profiler_stop();
+               kprof.profiling = FALSE;
+       }
+       poperror();
+       qunlock(&kprof.lock);
+}
+
+/* Makes each core flush its results into the profiler queue.  You can do this
+ * while the profiler is still running.  However, this does not hang up the
+ * queue, so reads on kpdata will block. */
+static void kprof_flush_profiler(void)
+{
+       ERRSTACK(1);
+
+       qlock(&kprof.lock);
+       if (waserror()) {
+               qunlock(&kprof.lock);
+               nexterror();
        }
-       struct timer_chain *tchain = &per_cpu_info[core_id()].tchain;
-       struct alarm_waiter *waiter = kmalloc(sizeof(struct alarm_waiter), 0);
-       init_awaiter(waiter, dummy_alarm);
-       set_awaiter_rel(waiter, 1000);
-       /* with this style, the timer IRQ goes off and runs us, but doesn't get
-        * reprogrammed til the next alarm. TODO: irq/rkm alarms. */
-       register_irq(IdtLAPIC_TIMER, kprof_irq_handler, NULL,
-                    MKBUS(BusLAPIC, 0, 0, 0));
-       set_alarm(tchain, waiter);
+       if (kprof.profiling)
+               profiler_trace_data_flush();
+       poperror();
+       qunlock(&kprof.lock);
+}
+
+static void kprof_init(void)
+{
+       profiler_init();
+
+       qlock_init(&kprof.lock);
+       kprof.profiling = FALSE;
+       kprof.opened = FALSE;
+
+       for (int i = 0; i < ARRAY_SIZE(kproftab); i++)
+               kproftab[i].length = 0;
+
+       kprof.mpstat_ipi = TRUE;
+       kproftab[Kmpstatqid].length = mpstat_len();
+       kproftab[Kmpstatrawqid].length = mpstatraw_len();
+
+       strlcpy(kprof_control_usage, "start|stop|flush",
+               sizeof(kprof_control_usage));
+       profiler_append_configure_usage(kprof_control_usage,
+                                       sizeof(kprof_control_usage));
 }
 
-static void
-kprofinit(void)
+static void kprof_shutdown(void)
 {
-       if(CELLSIZE != sizeof kprof.buf[0])
-               panic("kprof size");
 }
 
-static struct walkqid*
-kprofwalk(struct chan *c, struct chan *nc, char **name, int nname)
+static struct walkqid *kprof_walk(struct chan *c, struct chan *nc, char **name,
+                                  unsigned int nname)
 {
        return devwalk(c, nc, name, nname, kproftab, ARRAY_SIZE(kproftab), devgen);
 }
 
-static int
-kprofstat(struct chan *c, uint8_t *db, int n)
+static size_t kprof_profdata_size(void)
+{
+       return profiler_size();
+}
+
+static long kprof_profdata_read(void *dest, long size, int64_t off)
+{
+       return profiler_read(dest, size);
+}
+
+static size_t kprof_stat(struct chan *c, uint8_t *db, size_t n)
 {
-       /* barf. */
-       kproftab[3].length = oproflen();
+       kproftab[Kprofdataqid].length = kprof_profdata_size();
+       kproftab[Kptraceqid].length = kprof_tracedata_size();
 
        return devstat(c, db, n, kproftab, ARRAY_SIZE(kproftab), devgen);
 }
 
-static struct chan*
-kprofopen(struct chan *c, int omode)
+static struct chan *kprof_open(struct chan *c, int omode)
 {
-       if(c->qid.type & QTDIR){
-               if(omode != OREAD)
-                       error(Eperm);
+       if (c->qid.type & QTDIR) {
+               if (openmode(omode) != O_READ)
+                       error(EPERM, ERROR_FIXME);
+       }
+       switch ((int) c->qid.path) {
+       case Kprofctlqid:
+               /* We have one global profiler.  Only one FD may be opened at a time for
+                * it.  If we ever have separate profilers, we can create the profiler
+                * here, and every open would get a separate instance. */
+               qlock(&kprof.lock);
+               if (kprof.opened) {
+                       qunlock(&kprof.lock);
+                       error(EBUSY, "Global profiler is already open");
+               }
+               kprof.opened = TRUE;
+               /* TODO: have a real creation function for a non-global profiler */
+               profiler_setup();
+               qunlock(&kprof.lock);
+               break;
        }
        c->mode = openmode(omode);
        c->flag |= COPEN;
@@ -190,85 +236,114 @@ kprofopen(struct chan *c, int omode)
        return c;
 }
 
-static void
-kprofclose(struct chan*unused)
+static void kprof_close(struct chan *c)
+{
+       if (c->flag & COPEN) {
+               switch ((int) c->qid.path) {
+               case Kprofctlqid:
+                       kprof_stop_profiler();
+                       qlock(&kprof.lock);
+                       profiler_cleanup();
+                       kprof.opened = FALSE;
+                       qunlock(&kprof.lock);
+                       break;
+               }
+       }
+}
+
+static long mpstat_read(void *va, long n, int64_t off)
+{
+       size_t bufsz = mpstat_len();
+       char *buf = kmalloc(bufsz, MEM_WAIT);
+       int len = 0;
+       struct per_cpu_info *pcpui;
+       uint64_t cpu_total;
+       struct timespec ts;
+
+       /* the IPI interferes with other cores, might want to disable that. */
+       if (kprof.mpstat_ipi)
+               send_broadcast_ipi(I_POKE_CORE);
+
+       len += snprintf(buf + len, bufsz - len, "  CPU: ");
+       for (int j = 0; j < NR_CPU_STATES; j++)
+               len += snprintf(buf + len, bufsz - len, "%23s%s", cpu_state_names[j],
+                               j != NR_CPU_STATES - 1 ? "   " : "  \n");
+
+       for (int i = 0; i < num_cores; i++) {
+               pcpui = &per_cpu_info[i];
+               cpu_total = 0;
+               len += snprintf(buf + len, bufsz - len, "%5d: ", i);
+               for (int j = 0; j < NR_CPU_STATES; j++)
+                       cpu_total += pcpui->state_ticks[j];
+               cpu_total = MAX(cpu_total, 1);  /* for the divide later */
+               for (int j = 0; j < NR_CPU_STATES; j++) {
+                       ts = tsc2timespec(pcpui->state_ticks[j]);
+                       len += snprintf(buf + len, bufsz - len, "%10d.%06d (%3d%%)%s",
+                                       ts.tv_sec, ts.tv_nsec / 1000,
+                                       MIN((pcpui->state_ticks[j] * 100) / cpu_total, 100),
+                                       j != NR_CPU_STATES - 1 ? ", " : " \n");
+               }
+       }
+       n = readstr(off, va, n, buf);
+       kfree(buf);
+       return n;
+}
+
+static long mpstatraw_read(void *va, long n, int64_t off)
 {
+       size_t bufsz = mpstatraw_len();
+       char *buf = kmalloc(bufsz, MEM_WAIT);
+       int len = 0;
+       struct per_cpu_info *pcpui;
+
+       /* could spit it all out in binary, though then it'd be harder to process
+        * the data across a mnt (if we export #K).  probably not a big deal. */
+
+       /* header line: version, num_cores, tsc freq, state names */
+       len += snprintf(buf + len, bufsz - len, "v%03d %5d %16llu", 1, num_cores,
+                       __proc_global_info.tsc_freq);
+       for (int j = 0; j < NR_CPU_STATES; j++)
+               len += snprintf(buf + len, bufsz - len, " %6s", cpu_state_names[j]);
+       len += snprintf(buf + len, bufsz - len, "\n");
+
+       for (int i = 0; i < num_cores; i++) {
+               pcpui = &per_cpu_info[i];
+               len += snprintf(buf + len, bufsz - len, "%5d: ", i);
+               for (int j = 0; j < NR_CPU_STATES; j++) {
+                       len += snprintf(buf + len, bufsz - len, "%16llx%s",
+                                       pcpui->state_ticks[j],
+                                       j != NR_CPU_STATES - 1 ? " " : "\n");
+               }
+       }
+       n = readstr(off, va, n, buf);
+       kfree(buf);
+       return n;
 }
 
-static long
-kprofread(struct chan *c, void *va, long n, int64_t off)
+static size_t kprof_read(struct chan *c, void *va, size_t n, off64_t off)
 {
        uint64_t w, *bp;
        char *a, *ea;
        uintptr_t offset = off;
        uint64_t pc;
-       int snp_ret, ret = 0;
 
-       switch((int)c->qid.path){
+       switch ((int) c->qid.path) {
        case Kprofdirqid:
                return devdirread(c, va, n, kproftab, ARRAY_SIZE(kproftab), devgen);
-
        case Kprofdataqid:
-
-               if (n < FORMATSIZE){
-                       n = 0;
-                       break;
-               }
-               a = va;
-               ea = a + n;
-
-               /* we check offset later before deref bp.  offset / FORMATSIZE is how
-                * many entries we're skipping/offsetting. */
-               bp = kprof.buf + offset/FORMATSIZE;
-               pc = kprof.minpc + ((offset/FORMATSIZE)<<LRES);
-               while((a < ea) && (n >= FORMATSIZE)){
-                       /* what a pain. We need to manage the
-                        * fact that the *prints all make room for
-                        * \0
-                        */
-                       char print[FORMATSIZE+1];
-                       char *name;
-                       int amt_read;
-
-                       if (pc >= kprof.maxpc)
-                               break;
-                       /* pc is also our exit for bp.  should be in lockstep */
-                       // XXX this assert fails, fix it!
-                       //assert(bp < kprof.buf + kprof.nbuf);
-                       /* do not attempt to filter these results based on w < threshold.
-                        * earlier, we computed bp/pc based on assuming a full-sized file,
-                        * and skipping entries will result in read() calls thinking they
-                        * received earlier entries when they really received later ones.
-                        * imagine a case where there are 1000 skipped items, and read()
-                        * asks for chunks of 32.  it'll get chunks of the next 32 valid
-                        * items, over and over (1000/32 times). */
-                       w = *bp++;
-
-                       if (pc == kprof.minpc)
-                               name = "Total";
-                       else if (pc == kprof.minpc + 8)
-                               name = "User";
-                       else
-                               name = get_fn_name(pc);
-
-                       snp_ret = snprintf(print, sizeof(print), outformat, pc, name, w);
-                       assert(snp_ret == FORMATSIZE);
-                       if ((pc != kprof.minpc) && (pc != kprof.minpc + 8))
-                               kfree(name);
-
-                       amt_read = readmem(offset % FORMATSIZE, a, n, print, FORMATSIZE);
-                       offset = 0;     /* future loops have no offset */
-
-                       a += amt_read;
-                       n -= amt_read;
-                       ret += amt_read;
-
-                       pc += (1 << LRES);
-               }
-               n = ret;
+               n = kprof_profdata_read(va, n, off);
+               break;
+       case Kptraceqid:
+               n = kprof_tracedata_read(va, n, off);
+               break;
+       case Kprintxqid:
+               n = readstr(offset, va, n, printx_on ? "on" : "off");
+               break;
+       case Kmpstatqid:
+               n = mpstat_read(va, n, offset);
                break;
-       case Kprofoprofileqid:
-               n = oprofread(va,n);
+       case Kmpstatrawqid:
+               n = mpstatraw_read(va, n, offset);
                break;
        default:
                n = 0;
@@ -277,69 +352,245 @@ kprofread(struct chan *c, void *va, long n, int64_t off)
        return n;
 }
 
-static void kprof_clear(struct kprof *kp)
+static size_t kprof_write(struct chan *c, void *a, size_t n, off64_t unused)
 {
-       spin_lock(&kp->lock);
-       memset(kp->buf, 0, kp->buf_sz);
-       spin_unlock(&kp->lock);
-}
+       ERRSTACK(1);
+       struct cmdbuf *cb = parsecmd(a, n);
 
-static long
-kprofwrite(struct chan *c, void *a, long n, int64_t unused)
-{
-       uintptr_t pc;
-       switch((int)(c->qid.path)){
+       if (waserror()) {
+               kfree(cb);
+               nexterror();
+       }
+       switch ((int) c->qid.path) {
        case Kprofctlqid:
-               if(strncmp(a, "startclr", 8) == 0){
-                       kprof_clear(&kprof);
-                       kprof.time = 1;
-               }else if(strncmp(a, "start", 5) == 0) {
-                       kprof.time = 1;
-                       setup_timers();
-               } else if(strncmp(a, "stop", 4) == 0) {
-                       kprof.time = 0;
-               } else if(strncmp(a, "clear", 5) == 0) {
-                       kprof_clear(&kprof);
-               }else if(strncmp(a, "opstart", 7) == 0) {
-                       oprofile_control_trace(1);
-               }else if(strncmp(a, "opstop", 6) == 0) {
-                       oprofile_control_trace(0);
-               } else  {
-                       printk("startclr|start|stop|clear|opstart|opstop");
-                       error("startclr|start|stop|clear|opstart|opstop");
+               if (cb->nf < 1)
+                       error(EFAIL, kprof_control_usage);
+               if (profiler_configure(cb))
+                       break;
+               if (!strcmp(cb->f[0], "start")) {
+                       kprof_start_profiler();
+               } else if (!strcmp(cb->f[0], "flush")) {
+                       kprof_flush_profiler();
+               } else if (!strcmp(cb->f[0], "stop")) {
+                       kprof_stop_profiler();
+               } else {
+                       error(EFAIL, kprof_control_usage);
                }
                break;
-
-               /* The format is a long as text. We strtoul, and jam it into the
-                * trace buffer.
-                */
-       case Kprofoprofileqid:
-               pc = strtoul(a, 0, 0);
-               oprofile_add_trace(pc);
+       case Kptracectlqid:
+               if (cb->nf < 1)
+                       error(EFAIL, "Bad kptrace_ctl option (reset)");
+               if (!strcmp(cb->f[0], "clear")) {
+                       spin_lock_irqsave(&ktrace_lock);
+                       circular_buffer_clear(&ktrace_data);
+                       spin_unlock_irqsave(&ktrace_lock);
+               }
+               break;
+       case Kptraceqid:
+               if (a && (n > 0)) {
+                       char *uptr = user_strdup_errno(current, a, n);
+
+                       if (uptr) {
+                               trace_printk("%s", uptr);
+                               user_memdup_free(current, uptr);
+                       } else {
+                               n = -1;
+                       }
+               }
+               break;
+       case Kprintxqid:
+               if (!strncmp(a, "on", 2))
+                       set_printx(1);
+               else if (!strncmp(a, "off", 3))
+                       set_printx(0);
+               else if (!strncmp(a, "toggle", 6))
+                       set_printx(2);
+               else
+                       error(EFAIL, "Invalid option to Kprintx %s\n", a);
+               break;
+       case Kmpstatqid:
+       case Kmpstatrawqid:
+               if (cb->nf < 1)
+                       error(EFAIL, "Bad mpstat option (reset|ipi|on|off)");
+               if (!strcmp(cb->f[0], "reset")) {
+                       for (int i = 0; i < num_cores; i++)
+                               reset_cpu_state_ticks(i);
+               } else if (!strcmp(cb->f[0], "on")) {
+                       /* TODO: enable the ticks */ ;
+               } else if (!strcmp(cb->f[0], "off")) {
+                       /* TODO: disable the ticks */ ;
+               } else if (!strcmp(cb->f[0], "ipi")) {
+                       if (cb->nf < 2)
+                               error(EFAIL, "Need another arg: ipi [on|off]");
+                       if (!strcmp(cb->f[1], "on"))
+                               kprof.mpstat_ipi = TRUE;
+                       else if (!strcmp(cb->f[1], "off"))
+                               kprof.mpstat_ipi = FALSE;
+                       else
+                               error(EFAIL, "ipi [on|off]");
+               } else {
+                       error(EFAIL, "Bad mpstat option (reset|ipi|on|off)");
+               }
                break;
        default:
-               error(Ebadusefd);
+               error(EBADFD, ERROR_FIXME);
        }
+       kfree(cb);
+       poperror();
        return n;
 }
 
+size_t kprof_tracedata_size(void)
+{
+       return circular_buffer_size(&ktrace_data);
+}
+
+size_t kprof_tracedata_read(void *data, size_t size, size_t offset)
+{
+       spin_lock_irqsave(&ktrace_lock);
+       if (likely(ktrace_init_done))
+               size = circular_buffer_read(&ktrace_data, data, size, offset);
+       else
+               size = 0;
+       spin_unlock_irqsave(&ktrace_lock);
+
+       return size;
+}
+
+void kprof_dump_data(void)
+{
+       void *buf;
+       size_t len = kprof_tracedata_size();
+
+       buf = kmalloc(len, MEM_WAIT);
+       kprof_tracedata_read(buf, len, 0);
+       printk("%s", buf);
+       kfree(buf);
+}
+
+void kprof_tracedata_write(const char *pretty_buf, size_t len)
+{
+       spin_lock_irqsave(&ktrace_lock);
+       if (unlikely(!ktrace_init_done)) {
+               circular_buffer_init(&ktrace_data, sizeof(ktrace_buffer),
+                                    ktrace_buffer);
+               ktrace_init_done = TRUE;
+       }
+       circular_buffer_write(&ktrace_data, pretty_buf, len);
+       spin_unlock_irqsave(&ktrace_lock);
+}
+
+static struct trace_printk_buffer *kprof_get_printk_buffer(void)
+{
+       static struct trace_printk_buffer boot_tpb;
+       static struct trace_printk_buffer *cpu_tpbs;
+       static atomic_t alloc_done;
+
+       if (unlikely(booting))
+               return &boot_tpb;
+       if (unlikely(!cpu_tpbs)) {
+               /* Poor man per-CPU data structure. I really do no like littering global
+                * data structures with module specific data.
+                * We cannot take the ktrace_lock to protect the kzmalloc() call, as
+                * that might trigger printk()s, and we would reenter here.
+                * Let only one core into the kzmalloc() path, and let the others get
+                * the boot_tpb until finished.
+                */
+               if (!atomic_cas(&alloc_done, 0, 1))
+                       return &boot_tpb;
+               cpu_tpbs = kzmalloc(num_cores * sizeof(struct trace_printk_buffer), 0);
+       }
+
+       return cpu_tpbs + core_id_early();
+}
+
+void trace_vprintk(const char *fmt, va_list args)
+{
+       struct print_buf {
+               char *ptr;
+               char *top;
+       };
+
+       void emit_print_buf_str(struct print_buf *pb, const char *str, ssize_t size)
+       {
+               if (size < 0) {
+                       for (; *str && (pb->ptr < pb->top); str++)
+                               *(pb->ptr++) = *str;
+               } else {
+                       for (; (size > 0) && (pb->ptr < pb->top); str++, size--)
+                               *(pb->ptr++) = *str;
+               }
+       }
+
+       static const size_t bufsz = TRACE_PRINTK_BUFFER_SIZE;
+       static const size_t usr_bufsz = (3 * bufsz) / 8;
+       static const size_t kp_bufsz = bufsz - usr_bufsz;
+       struct trace_printk_buffer *tpb = kprof_get_printk_buffer();
+       struct timespec ts_now = { 0, 0 };
+       struct print_buf pb;
+       char *usrbuf = tpb->buffer, *kpbuf = tpb->buffer + usr_bufsz;
+       const char *utop, *uptr;
+       char hdr[64];
+
+       if (!atomic_cas(&tpb->in_use, 0, 1))
+               return;
+       if (likely(__proc_global_info.tsc_freq))
+               ts_now = tsc2timespec(read_tsc());
+       snprintf(hdr, sizeof(hdr), "[%lu.%09lu]:cpu%d: ", ts_now.tv_sec,
+                ts_now.tv_nsec, core_id_early());
+
+       pb.ptr = usrbuf + vsnprintf(usrbuf, usr_bufsz, fmt, args);
+       pb.top = usrbuf + usr_bufsz;
+
+       if (pb.ptr[-1] != '\n')
+               emit_print_buf_str(&pb, "\n", 1);
+       /* snprintf null terminates the buffer, and does not count that as part of
+        * the len.  If we maxed out the buffer, let's make sure it has a \n.
+        */
+       if (pb.ptr == pb.top)
+               pb.ptr[-1] = '\n';
+       utop = pb.ptr;
+
+       pb.ptr = kpbuf;
+       pb.top = kpbuf + kp_bufsz;
+       for (uptr = usrbuf; uptr < utop;) {
+               const char *nlptr = memchr(uptr, '\n', utop - uptr);
+
+               if (nlptr == NULL)
+                       nlptr = utop;
+               emit_print_buf_str(&pb, hdr, -1);
+               emit_print_buf_str(&pb, uptr, (nlptr - uptr) + 1);
+               uptr = nlptr + 1;
+       }
+       kprof_tracedata_write(kpbuf, pb.ptr - kpbuf);
+       atomic_set(&tpb->in_use, 0);
+}
+
+void trace_printk(const char *fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+       trace_vprintk(fmt, args);
+       va_end(args);
+}
+
 struct dev kprofdevtab __devtab = {
-       'K',
-       "kprof",
-
-       devreset,
-       kprofinit,
-       devshutdown,
-       kprofattach,
-       kprofwalk,
-       kprofstat,
-       kprofopen,
-       devcreate,
-       kprofclose,
-       kprofread,
-       devbread,
-       kprofwrite,
-       devbwrite,
-       devremove,
-       devwstat,
+       .name = "kprof",
+
+       .reset = devreset,
+       .init = kprof_init,
+       .shutdown = kprof_shutdown,
+       .attach = kprof_attach,
+       .walk = kprof_walk,
+       .stat = kprof_stat,
+       .open = kprof_open,
+       .create = devcreate,
+       .close = kprof_close,
+       .read = kprof_read,
+       .bread = devbread,
+       .write = kprof_write,
+       .bwrite = devbwrite,
+       .remove = devremove,
+       .wstat = devwstat,
 };