x86: Panic if there is no x2APIC
[akaros.git] / kern / src / profiler.c
index 6e68b53..c94caed 100644 (file)
@@ -7,10 +7,9 @@
  * creation.
  *
  * Events are collected in a central qio queue.  High-frequency events (e.g.
- * profiler_add_hw_sample()) are collected in per-core buffers, which are
- * flushed to the central queue when they fill up or on command.
- * Lower-frequency events (e.g. profiler_notify_mmap()) just go straight to the
- * central queue.
+ * IRQ backtraces()) are collected in per-core buffers, which are flushed to the
+ * central queue when they fill up or on command.  Lower-frequency events (e.g.
+ * profiler_notify_mmap()) just go straight to the central queue.
  *
  * Currently there is one global profiler.  Kprof is careful to only have one
  * open profiler at a time.  We assert that this is true.  TODO: stop using the
@@ -45,7 +44,6 @@
 #include "profiler.h"
 
 #define PROFILER_MAX_PRG_PATH  256
-#define PROFILER_BT_DEPTH 16
 
 #define VBE_MAX_SIZE(t) ((8 * sizeof(t) + 6) / 7)
 
@@ -130,6 +128,7 @@ static void profiler_push_kernel_trace64(struct profiler_cpu_context *cpu_buf,
                                          const uintptr_t *trace, size_t count,
                                          uint64_t info)
 {
+       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
        size_t size = sizeof(struct proftype_kern_trace64) +
                count * sizeof(uint64_t);
        struct block *b;
@@ -151,6 +150,10 @@ static void profiler_push_kernel_trace64(struct profiler_cpu_context *cpu_buf,
 
                record->info = info;
                record->tstamp = nsec();
+               if (is_ktask(pcpui->cur_kthread) || !pcpui->cur_proc)
+                       record->pid = -1;
+               else
+                       record->pid = pcpui->cur_proc->pid;
                record->cpu = cpu_buf->cpu;
                record->num_traces = count;
                for (size_t i = 0; i < count; i++)
@@ -493,65 +496,31 @@ void profiler_trace_data_flush(void)
        smp_do_in_cores(&cset, profiler_core_flush, NULL);
 }
 
-void profiler_add_trace(uintptr_t pc, uint64_t info)
-{
-       if (is_user_raddr((void *) pc, 1))
-               profiler_add_user_backtrace(pc, 0, info);
-       else
-               profiler_add_kernel_backtrace(pc, 0, info);
-}
-
-void profiler_add_kernel_backtrace(uintptr_t pc, uintptr_t fp, uint64_t info)
+void profiler_push_kernel_backtrace(uintptr_t *pc_list, size_t nr_pcs,
+                                    uint64_t info)
 {
        if (kref_get_not_zero(&profiler_kref, 1)) {
                struct profiler_cpu_context *cpu_buf = profiler_get_cpu_ctx(core_id());
 
-               if (profiler_percpu_ctx && cpu_buf->tracing) {
-                       uintptr_t trace[PROFILER_BT_DEPTH];
-                       size_t n = 1;
-
-                       trace[0] = pc;
-                       if (likely(fp))
-                               n = backtrace_list(pc, fp, trace + 1,
-                                                  PROFILER_BT_DEPTH - 1) + 1;
-
-                       profiler_push_kernel_trace64(cpu_buf, trace, n, info);
-               }
+               if (profiler_percpu_ctx && cpu_buf->tracing)
+                       profiler_push_kernel_trace64(cpu_buf, pc_list, nr_pcs, info);
                kref_put(&profiler_kref);
        }
 }
 
-void profiler_add_user_backtrace(uintptr_t pc, uintptr_t fp, uint64_t info)
+void profiler_push_user_backtrace(uintptr_t *pc_list, size_t nr_pcs,
+                                  uint64_t info)
 {
        if (kref_get_not_zero(&profiler_kref, 1)) {
                struct proc *p = current;
                struct profiler_cpu_context *cpu_buf = profiler_get_cpu_ctx(core_id());
 
-               if (p && profiler_percpu_ctx && cpu_buf->tracing) {
-                       uintptr_t trace[PROFILER_BT_DEPTH];
-                       size_t n = 1;
-
-                       trace[0] = pc;
-                       if (likely(fp))
-                               n = backtrace_user_list(pc, fp, trace + 1,
-                                                       PROFILER_BT_DEPTH - 1) + 1;
-
-                       profiler_push_user_trace64(cpu_buf, p, trace, n, info);
-               }
+               if (profiler_percpu_ctx && cpu_buf->tracing)
+                       profiler_push_user_trace64(cpu_buf, p, pc_list, nr_pcs, info);
                kref_put(&profiler_kref);
        }
 }
 
-void profiler_add_hw_sample(struct hw_trapframe *hw_tf, uint64_t info)
-{
-       if (in_kernel(hw_tf))
-               profiler_add_kernel_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf),
-                                             info);
-       else
-               profiler_add_user_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf),
-                                           info);
-}
-
 int profiler_size(void)
 {
        return profiler_queue ? qlen(profiler_queue) : 0;