printk: check for user pointers in format string parameters
[akaros.git] / kern / src / profiler.c
index c85f45b..ede0abc 100644 (file)
@@ -7,10 +7,9 @@
  * creation.
  *
  * Events are collected in a central qio queue.  High-frequency events (e.g.
- * profiler_add_hw_sample()) are collected in per-core buffers, which are
- * flushed to the central queue when they fill up or on command.
- * Lower-frequency events (e.g. profiler_notify_mmap()) just go straight to the
- * central queue.
+ * IRQ backtraces()) are collected in per-core buffers, which are flushed to the
+ * central queue when they fill up or on command.  Lower-frequency events (e.g.
+ * profiler_notify_mmap()) just go straight to the central queue.
  *
  * Currently there is one global profiler.  Kprof is careful to only have one
  * open profiler at a time.  We assert that this is true.  TODO: stop using the
@@ -31,7 +30,6 @@
 #include <env.h>
 #include <process.h>
 #include <mm.h>
-#include <vfs.h>
 #include <kmalloc.h>
 #include <pmap.h>
 #include <kref.h>
@@ -45,7 +43,6 @@
 #include "profiler.h"
 
 #define PROFILER_MAX_PRG_PATH  256
-#define PROFILER_BT_DEPTH 16
 
 #define VBE_MAX_SIZE(t) ((8 * sizeof(t) + 6) / 7)
 
@@ -71,9 +68,9 @@ static inline struct profiler_cpu_context *profiler_get_cpu_ctx(int cpu)
 
 static inline char *vb_encode_uint64(char *data, uint64_t n)
 {
-       /* Classical variable bytes encoding. Encodes 7 bits at a time, using bit
-        * number 7 in the byte, as indicator of end of sequence (when zero).
-        */
+       /* Classical variable bytes encoding. Encodes 7 bits at a time, using
+        * bit number 7 in the byte, as indicator of end of sequence (when
+        * zero). */
        for (; n >= 0x80; n >>= 7)
                *data++ = (char) (n | 0x80);
        *data++ = (char) n;
@@ -84,8 +81,9 @@ static inline char *vb_encode_uint64(char *data, uint64_t n)
 static struct block *profiler_buffer_write(struct profiler_cpu_context *cpu_buf,
                                            struct block *b)
 {
-       /* qpass will drop b if the queue is over its limit.  we're willing to lose
-        * traces, but we won't lose 'control' events, such as MMAP and PID. */
+       /* qpass will drop b if the queue is over its limit.  we're willing to
+        * lose traces, but we won't lose 'control' events, such as MMAP and
+        * PID. */
        if (b) {
                if (qpass(profiler_queue, b) < 0)
                        cpu_buf->dropped_data_cnt++;
@@ -262,8 +260,9 @@ static void profiler_emit_current_system_status(void)
        {
                struct proc *p = (struct proc *) opaque;
 
-               profiler_notify_mmap(p, vmr->vm_base, vmr->vm_end - vmr->vm_base,
-                                    vmr->vm_prot, vmr->vm_flags, vmr->vm_file,
+               profiler_notify_mmap(p, vmr->vm_base,
+                                    vmr->vm_end - vmr->vm_base,
+                                    vmr->vm_prot, vmr->vm_flags, vmr->__vm_foc,
                                     vmr->vm_foff);
        }
 
@@ -300,14 +299,14 @@ static void alloc_cpu_buffers(void)
 {
        ERRSTACK(1);
 
-       /* It is very important that we enqueue and dequeue entire records at once.
-        * If we leave partial records, the entire stream will be corrupt.  Our
-        * reader does its best to make sure it has room for complete records
-        * (checks qlen()).
+       /* It is very important that we enqueue and dequeue entire records at
+        * once.  If we leave partial records, the entire stream will be
+        * corrupt.  Our reader does its best to make sure it has room for
+        * complete records (checks qlen()).
         *
         * If we ever get corrupt streams, try making this a Qmsg.  Though it
-        * doesn't help every situation - we have issues with writes greater than
-        * Maxatomic regardless. */
+        * doesn't help every situation - we have issues with writes greater
+        * than Maxatomic regardless. */
        profiler_queue = qopen(profiler_queue_limit, 0, NULL, NULL);
        if (!profiler_queue)
                error(ENOMEM, ERROR_FIXME);
@@ -498,57 +497,35 @@ void profiler_trace_data_flush(void)
        smp_do_in_cores(&cset, profiler_core_flush, NULL);
 }
 
-void profiler_add_trace(uintptr_t pc, uint64_t info)
-{
-       if (is_user_raddr((void *) pc, 1))
-               profiler_add_user_backtrace(pc, 0, info);
-       else
-               profiler_add_kernel_backtrace(pc, 0, info);
-}
-
-void profiler_add_kernel_backtrace(uintptr_t pc, uintptr_t fp, uint64_t info)
+void profiler_push_kernel_backtrace(uintptr_t *pc_list, size_t nr_pcs,
+                                    uint64_t info)
 {
        if (kref_get_not_zero(&profiler_kref, 1)) {
-               struct profiler_cpu_context *cpu_buf = profiler_get_cpu_ctx(core_id());
-
-               if (profiler_percpu_ctx && cpu_buf->tracing) {
-                       uintptr_t trace[PROFILER_BT_DEPTH];
-                       size_t n;
+               struct profiler_cpu_context *cpu_buf =
+                       profiler_get_cpu_ctx(core_id());
 
-                       n = backtrace_list(pc, fp, trace, PROFILER_BT_DEPTH);
-                       profiler_push_kernel_trace64(cpu_buf, trace, n, info);
-               }
+               if (profiler_percpu_ctx && cpu_buf->tracing)
+                       profiler_push_kernel_trace64(cpu_buf, pc_list, nr_pcs,
+                                                    info);
                kref_put(&profiler_kref);
        }
 }
 
-void profiler_add_user_backtrace(uintptr_t pc, uintptr_t fp, uint64_t info)
+void profiler_push_user_backtrace(uintptr_t *pc_list, size_t nr_pcs,
+                                  uint64_t info)
 {
        if (kref_get_not_zero(&profiler_kref, 1)) {
                struct proc *p = current;
-               struct profiler_cpu_context *cpu_buf = profiler_get_cpu_ctx(core_id());
+               struct profiler_cpu_context *cpu_buf =
+                       profiler_get_cpu_ctx(core_id());
 
-               if (p && profiler_percpu_ctx && cpu_buf->tracing) {
-                       uintptr_t trace[PROFILER_BT_DEPTH];
-                       size_t n;
-
-                       n = backtrace_user_list(pc, fp, trace, PROFILER_BT_DEPTH);
-                       profiler_push_user_trace64(cpu_buf, p, trace, n, info);
-               }
+               if (profiler_percpu_ctx && cpu_buf->tracing)
+                       profiler_push_user_trace64(cpu_buf, p, pc_list, nr_pcs,
+                                                  info);
                kref_put(&profiler_kref);
        }
 }
 
-void profiler_add_hw_sample(struct hw_trapframe *hw_tf, uint64_t info)
-{
-       if (in_kernel(hw_tf))
-               profiler_add_kernel_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf),
-                                             info);
-       else
-               profiler_add_user_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf),
-                                           info);
-}
-
 int profiler_size(void)
 {
        return profiler_queue ? qlen(profiler_queue) : 0;
@@ -560,15 +537,17 @@ int profiler_read(void *va, int n)
 }
 
 void profiler_notify_mmap(struct proc *p, uintptr_t addr, size_t size, int prot,
-                          int flags, struct file *f, size_t offset)
+                          int flags, struct file_or_chan *foc, size_t offset)
 {
        if (kref_get_not_zero(&profiler_kref, 1)) {
-               if (f && (prot & PROT_EXEC) && profiler_percpu_ctx) {
+               if (foc && (prot & PROT_EXEC) && profiler_percpu_ctx) {
                        char path_buf[PROFILER_MAX_PRG_PATH];
-                       char *path = file_abs_path(f, path_buf, sizeof(path_buf));
+                       char *path = foc_abs_path(foc, path_buf,
+                                                 sizeof(path_buf));
 
                        if (likely(path))
-                               profiler_push_pid_mmap(p, addr, size, offset, path);
+                               profiler_push_pid_mmap(p, addr, size, offset,
+                                                      path);
                }
                kref_put(&profiler_kref);
        }