Initial commit of some oprofile support
authorRonald G. Minnich <rminnich@google.com>
Tue, 29 Apr 2014 14:59:24 +0000 (07:59 -0700)
committerRonald G. Minnich <rminnich@google.com>
Tue, 29 Apr 2014 15:26:01 +0000 (08:26 -0700)
This is going to need some work, but we need it.
It's weird how complicated it is to work with synthetics in linux.
I expect this code to shrink a lot.

Signed-off-by: Ronald G. Minnich <rminnich@google.com>
17 files changed:
.gitignore
kern/src/oprofile/COPYING [new file with mode: 0644]
kern/src/oprofile/buffer_sync.c [new file with mode: 0644]
kern/src/oprofile/buffer_sync.h [new file with mode: 0644]
kern/src/oprofile/cpu_buffer.c [new file with mode: 0644]
kern/src/oprofile/cpu_buffer.h [new file with mode: 0644]
kern/src/oprofile/event_buffer.c [new file with mode: 0644]
kern/src/oprofile/event_buffer.h [new file with mode: 0644]
kern/src/oprofile/nmi_timer_int.c [new file with mode: 0644]
kern/src/oprofile/oprof.c [new file with mode: 0644]
kern/src/oprofile/oprof.h [new file with mode: 0644]
kern/src/oprofile/oprofile_files.c [new file with mode: 0644]
kern/src/oprofile/oprofile_perf.c [new file with mode: 0644]
kern/src/oprofile/oprofile_stats.c [new file with mode: 0644]
kern/src/oprofile/oprofile_stats.h [new file with mode: 0644]
kern/src/oprofile/oprofilefs.c [new file with mode: 0644]
kern/src/oprofile/timer_int.c [new file with mode: 0644]

index dbfb6fc..5d662a1 100644 (file)
@@ -40,3 +40,4 @@ kern/kfs/*
 scripts/kconfig/zconf.lex.c
 kern/include/config/*
 kern/include/generated/*
+tags
diff --git a/kern/src/oprofile/COPYING b/kern/src/oprofile/COPYING
new file mode 100644 (file)
index 0000000..ca442d3
--- /dev/null
@@ -0,0 +1,356 @@
+
+   NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Also note that the only valid version of the GPL as far as the kernel
+ is concerned is _this_ particular version of the license (ie v2, not
+ v2.2 or v3.x or whatever), unless explicitly otherwise stated.
+
+                       Linus Torvalds
+
+----------------------------------------
+
+                   GNU GENERAL PUBLIC LICENSE
+                      Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                           Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+\f
+                   GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+\f
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+\f
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+\f
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                           NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                    END OF TERMS AND CONDITIONS
+\f
+           How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/kern/src/oprofile/buffer_sync.c b/kern/src/oprofile/buffer_sync.c
new file mode 100644 (file)
index 0000000..ade8b58
--- /dev/null
@@ -0,0 +1,566 @@
+/**
+ * @file buffer_sync.c
+ *
+ * @remark Copyright 2002-2009 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ * @author Barry Kasindorf
+ * @author Robert Richter <robert.richter@amd.com>
+ *
+ * This is the core of the buffer management. Each
+ * CPU buffer is processed and entered into the
+ * global event buffer. Such processing is necessary
+ * in several circumstances, mentioned below.
+ *
+ * The processing does the job of converting the
+ * transitory EIP value into a persistent dentry/offset
+ * value that the profiler can record at its leisure.
+ *
+ * See fs/dcookies.c for a description of the dentry/offset
+ * objects.
+ */
+
+#include "oprofile_stats.h"
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+
+static LIST_HEAD(dying_tasks);
+static LIST_HEAD(dead_tasks);
+static cpumask_var_t marked_cpus;
+static DEFINE_SPINLOCK(task_mortuary);
+static void process_task_mortuary(void);
+
+/* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+ * does the task eventually get freed, because by then
+ * we are sure we will not reference it again.
+ * Can be invoked from softirq via RCU callback due to
+ * call_rcu() of the task struct, hence the _irqsave.
+ */
+static int
+task_free_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+       unsigned long flags;
+       struct task_struct *task = data;
+       spin_lock_irqsave(&task_mortuary, flags);
+       list_add(&task->tasks, &dying_tasks);
+       spin_unlock_irqrestore(&task_mortuary, flags);
+       return NOTIFY_OK;
+}
+
+
+/* The task is on its way out. A sync of the buffer means we can catch
+ * any remaining samples for this task.
+ */
+static int
+task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+       /* To avoid latency problems, we only process the current CPU,
+        * hoping that most samples for the task are on this CPU
+        */
+       sync_buffer(raw_smp_processor_id());
+       return 0;
+}
+
+
+/* The task is about to try a do_munmap(). We peek at what it's going to
+ * do, and if it's an executable region, process the samples first, so
+ * we don't lose any. This does not have to be exact, it's a QoI issue
+ * only.
+ */
+static int
+munmap_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+       unsigned long addr = (unsigned long)data;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *mpnt;
+
+       down_read(&mm->mmap_sem);
+
+       mpnt = find_vma(mm, addr);
+       if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
+               up_read(&mm->mmap_sem);
+               /* To avoid latency problems, we only process the current CPU,
+                * hoping that most samples for the task are on this CPU
+                */
+               sync_buffer(raw_smp_processor_id());
+               return 0;
+       }
+
+       up_read(&mm->mmap_sem);
+       return 0;
+}
+
+
+/* We need to be told about new modules so we don't attribute to a previously
+ * loaded module, or drop the samples on the floor.
+ */
+static int
+module_load_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+       return 0;
+}
+
+
+static struct notifier_block task_free_nb = {
+       .notifier_call  = task_free_notify,
+};
+
+static struct notifier_block task_exit_nb = {
+       .notifier_call  = task_exit_notify,
+};
+
+static struct notifier_block munmap_nb = {
+       .notifier_call  = munmap_notify,
+};
+
+static struct notifier_block module_load_nb = {
+       .notifier_call = module_load_notify,
+};
+
+static void free_all_tasks(void)
+{
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+}
+
+int sync_start(void)
+{
+       int err;
+
+       if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+               return -ENOMEM;
+
+       err = task_handoff_register(&task_free_nb);
+       if (err)
+               goto out1;
+       err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
+       if (err)
+               goto out2;
+       err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
+       if (err)
+               goto out3;
+       err = register_module_notifier(&module_load_nb);
+       if (err)
+               goto out4;
+
+       start_cpu_work();
+
+out:
+       return err;
+out4:
+       profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
+out3:
+       profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
+out2:
+       task_handoff_unregister(&task_free_nb);
+       free_all_tasks();
+out1:
+       free_cpumask_var(marked_cpus);
+       goto out;
+}
+
+
+void sync_stop(void)
+{
+       end_cpu_work();
+       unregister_module_notifier(&module_load_nb);
+       profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
+       profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
+       task_handoff_unregister(&task_free_nb);
+       barrier();                      /* do all of the above first */
+
+       flush_cpu_work();
+
+       free_all_tasks();
+       free_cpumask_var(marked_cpus);
+}
+
+
+/* Optimisation. We can manage without taking the dcookie sem
+ * because we cannot reach this code without at least one
+ * dcookie user still being registered (namely, the reader
+ * of the event buffer). */
+static inline unsigned long fast_get_dcookie(struct path *path)
+{
+       unsigned long cookie;
+
+       if (path->dentry->d_flags & DCACHE_COOKIE)
+               return (unsigned long)path->dentry;
+       get_dcookie(path, &cookie);
+       return cookie;
+}
+
+
+/* Look up the dcookie for the task's mm->exe_file,
+ * which corresponds loosely to "application name". This is
+ * not strictly necessary but allows oprofile to associate
+ * shared-library samples with particular applications
+ */
+static unsigned long get_exec_dcookie(struct mm_struct *mm)
+{
+       unsigned long cookie = NO_COOKIE;
+
+       if (mm && mm->exe_file)
+               cookie = fast_get_dcookie(&mm->exe_file->f_path);
+
+       return cookie;
+}
+
+
+/* Convert the EIP value of a sample into a persistent dentry/offset
+ * pair that can then be added to the global event buffer. We make
+ * sure to do this lookup before a mm->mmap modification happens so
+ * we don't lose track.
+ */
+static unsigned long
+lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
+{
+       unsigned long cookie = NO_COOKIE;
+       struct vm_area_struct *vma;
+
+       for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+
+               if (addr < vma->vm_start || addr >= vma->vm_end)
+                       continue;
+
+               if (vma->vm_file) {
+                       cookie = fast_get_dcookie(&vma->vm_file->f_path);
+                       *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
+                               vma->vm_start;
+               } else {
+                       /* must be an anonymous map */
+                       *offset = addr;
+               }
+
+               break;
+       }
+
+       if (!vma)
+               cookie = INVALID_COOKIE;
+
+       return cookie;
+}
+
+static unsigned long last_cookie = INVALID_COOKIE;
+
+static void add_cpu_switch(int i)
+{
+       add_event_entry(ESCAPE_CODE);
+       add_event_entry(CPU_SWITCH_CODE);
+       add_event_entry(i);
+       last_cookie = INVALID_COOKIE;
+}
+
+static void add_kernel_ctx_switch(unsigned int in_kernel)
+{
+       add_event_entry(ESCAPE_CODE);
+       if (in_kernel)
+               add_event_entry(KERNEL_ENTER_SWITCH_CODE);
+       else
+               add_event_entry(KERNEL_EXIT_SWITCH_CODE);
+}
+
+static void
+add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
+{
+       add_event_entry(ESCAPE_CODE);
+       add_event_entry(CTX_SWITCH_CODE);
+       add_event_entry(task->pid);
+       add_event_entry(cookie);
+       /* Another code for daemon back-compat */
+       add_event_entry(ESCAPE_CODE);
+       add_event_entry(CTX_TGID_CODE);
+       add_event_entry(task->tgid);
+}
+
+
+static void add_cookie_switch(unsigned long cookie)
+{
+       add_event_entry(ESCAPE_CODE);
+       add_event_entry(COOKIE_SWITCH_CODE);
+       add_event_entry(cookie);
+}
+
+
+static void add_trace_begin(void)
+{
+       add_event_entry(ESCAPE_CODE);
+       add_event_entry(TRACE_BEGIN_CODE);
+}
+
+static void add_data(struct op_entry *entry, struct mm_struct *mm)
+{
+       unsigned long code, pc, val;
+       unsigned long cookie;
+       off_t offset;
+
+       if (!op_cpu_buffer_get_data(entry, &code))
+               return;
+       if (!op_cpu_buffer_get_data(entry, &pc))
+               return;
+       if (!op_cpu_buffer_get_size(entry))
+               return;
+
+       if (mm) {
+               cookie = lookup_dcookie(mm, pc, &offset);
+
+               if (cookie == NO_COOKIE)
+                       offset = pc;
+               if (cookie == INVALID_COOKIE) {
+                       atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+                       offset = pc;
+               }
+               if (cookie != last_cookie) {
+                       add_cookie_switch(cookie);
+                       last_cookie = cookie;
+               }
+       } else
+               offset = pc;
+
+       add_event_entry(ESCAPE_CODE);
+       add_event_entry(code);
+       add_event_entry(offset);        /* Offset from Dcookie */
+
+       while (op_cpu_buffer_get_data(entry, &val))
+               add_event_entry(val);
+}
+
+static inline void add_sample_entry(unsigned long offset, unsigned long event)
+{
+       add_event_entry(offset);
+       add_event_entry(event);
+}
+
+
+/*
+ * Add a sample to the global event buffer. If possible the
+ * sample is converted into a persistent dentry/offset pair
+ * for later lookup from userspace. Return 0 on failure.
+ */
+static int
+add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
+{
+       unsigned long cookie;
+       off_t offset;
+
+       if (in_kernel) {
+               add_sample_entry(s->eip, s->event);
+               return 1;
+       }
+
+       /* add userspace sample */
+
+       if (!mm) {
+               atomic_inc(&oprofile_stats.sample_lost_no_mm);
+               return 0;
+       }
+
+       cookie = lookup_dcookie(mm, s->eip, &offset);
+
+       if (cookie == INVALID_COOKIE) {
+               atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+               return 0;
+       }
+
+       if (cookie != last_cookie) {
+               add_cookie_switch(cookie);
+               last_cookie = cookie;
+       }
+
+       add_sample_entry(offset, s->event);
+
+       return 1;
+}
+
+
+static void release_mm(struct mm_struct *mm)
+{
+       if (!mm)
+               return;
+       up_read(&mm->mmap_sem);
+       mmput(mm);
+}
+
+
+static struct mm_struct *take_tasks_mm(struct task_struct *task)
+{
+       struct mm_struct *mm = get_task_mm(task);
+       if (mm)
+               down_read(&mm->mmap_sem);
+       return mm;
+}
+
+
+static inline int is_code(unsigned long val)
+{
+       return val == ESCAPE_CODE;
+}
+
+
+/* Move tasks along towards death. Any tasks on dead_tasks
+ * will definitely have no remaining references in any
+ * CPU buffers at this point, because we use two lists,
+ * and to have reached the list, it must have gone through
+ * one full sync already.
+ */
+static void process_task_mortuary(void)
+{
+       unsigned long flags;
+       LIST_HEAD(local_dead_tasks);
+       struct task_struct *task;
+       struct task_struct *ttask;
+
+       spin_lock_irqsave(&task_mortuary, flags);
+
+       list_splice_init(&dead_tasks, &local_dead_tasks);
+       list_splice_init(&dying_tasks, &dead_tasks);
+
+       spin_unlock_irqrestore(&task_mortuary, flags);
+
+       list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
+               list_del(&task->tasks);
+               free_task(task);
+       }
+}
+
+
+static void mark_done(int cpu)
+{
+       int i;
+
+       cpumask_set_cpu(cpu, marked_cpus);
+
+       for_each_online_cpu(i) {
+               if (!cpumask_test_cpu(i, marked_cpus))
+                       return;
+       }
+
+       /* All CPUs have been processed at least once,
+        * we can process the mortuary once
+        */
+       process_task_mortuary();
+
+       cpumask_clear(marked_cpus);
+}
+
+
+/* FIXME: this is not sufficient if we implement syscall barrier backtrace
+ * traversal, the code switch to sb_sample_start at first kernel enter/exit
+ * switch so we need a fifth state and some special handling in sync_buffer()
+ */
+typedef enum {
+       sb_bt_ignore = -2,
+       sb_buffer_start,
+       sb_bt_start,
+       sb_sample_start,
+} sync_buffer_state;
+
+/* Sync one of the CPU's buffers into the global event buffer.
+ * Here we need to go through each batch of samples punctuated
+ * by context switch notes, taking the task's mmap_sem and doing
+ * lookup in task->mm->mmap to convert EIP into dcookie/offset
+ * value.
+ */
+void sync_buffer(int cpu)
+{
+       struct mm_struct *mm = NULL;
+       struct mm_struct *oldmm;
+       unsigned long val;
+       struct task_struct *new;
+       unsigned long cookie = 0;
+       int in_kernel = 1;
+       sync_buffer_state state = sb_buffer_start;
+       unsigned int i;
+       unsigned long available;
+       unsigned long flags;
+       struct op_entry entry;
+       struct op_sample *sample;
+
+       mutex_lock(&buffer_mutex);
+
+       add_cpu_switch(cpu);
+
+       op_cpu_buffer_reset(cpu);
+       available = op_cpu_buffer_entries(cpu);
+
+       for (i = 0; i < available; ++i) {
+               sample = op_cpu_buffer_read_entry(&entry, cpu);
+               if (!sample)
+                       break;
+
+               if (is_code(sample->eip)) {
+                       flags = sample->event;
+                       if (flags & TRACE_BEGIN) {
+                               state = sb_bt_start;
+                               add_trace_begin();
+                       }
+                       if (flags & KERNEL_CTX_SWITCH) {
+                               /* kernel/userspace switch */
+                               in_kernel = flags & IS_KERNEL;
+                               if (state == sb_buffer_start)
+                                       state = sb_sample_start;
+                               add_kernel_ctx_switch(flags & IS_KERNEL);
+                       }
+                       if (flags & USER_CTX_SWITCH
+                           && op_cpu_buffer_get_data(&entry, &val)) {
+                               /* userspace context switch */
+                               new = (struct task_struct *)val;
+                               oldmm = mm;
+                               release_mm(oldmm);
+                               mm = take_tasks_mm(new);
+                               if (mm != oldmm)
+                                       cookie = get_exec_dcookie(mm);
+                               add_user_ctx_switch(new, cookie);
+                       }
+                       if (op_cpu_buffer_get_size(&entry))
+                               add_data(&entry, mm);
+                       continue;
+               }
+
+               if (state < sb_bt_start)
+                       /* ignore sample */
+                       continue;
+
+               if (add_sample(mm, sample, in_kernel))
+                       continue;
+
+               /* ignore backtraces if failed to add a sample */
+               if (state == sb_bt_start) {
+                       state = sb_bt_ignore;
+                       atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+               }
+       }
+       release_mm(mm);
+
+       mark_done(cpu);
+
+       mutex_unlock(&buffer_mutex);
+}
+
+/* The function can be used to add a buffer worth of data directly to
+ * the kernel buffer. The buffer is assumed to be a circular buffer.
+ * Take the entries from index start and end at index end, wrapping
+ * at max_entries.
+ */
+void oprofile_put_buff(unsigned long *buf, unsigned int start,
+                      unsigned int stop, unsigned int max)
+{
+       int i;
+
+       i = start;
+
+       mutex_lock(&buffer_mutex);
+       while (i != stop) {
+               add_event_entry(buf[i++]);
+
+               if (i >= max)
+                       i = 0;
+       }
+
+       mutex_unlock(&buffer_mutex);
+}
+
diff --git a/kern/src/oprofile/buffer_sync.h b/kern/src/oprofile/buffer_sync.h
new file mode 100644 (file)
index 0000000..3110732
--- /dev/null
@@ -0,0 +1,22 @@
+/**
+ * @file buffer_sync.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_BUFFER_SYNC_H
+#define OPROFILE_BUFFER_SYNC_H
+
+/* add the necessary profiling hooks */
+int sync_start(void);
+
+/* remove the hooks */
+void sync_stop(void);
+
+/* sync the given CPU's buffer */
+void sync_buffer(int cpu);
+
+#endif /* OPROFILE_BUFFER_SYNC_H */
diff --git a/kern/src/oprofile/cpu_buffer.c b/kern/src/oprofile/cpu_buffer.c
new file mode 100644 (file)
index 0000000..38bb9a5
--- /dev/null
@@ -0,0 +1,459 @@
+/**
+ * @file cpu_buffer.c
+ *
+ * @remark Copyright 2002-2009 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ * @author Barry Kasindorf <barry.kasindorf@amd.com>
+ * @author Robert Richter <robert.richter@amd.com>
+ *
+ * Each CPU has a local buffer that stores PC value/event
+ * pairs. We also log context switches when we notice them.
+ * Eventually each CPU's buffer is processed into the global
+ * event buffer by sync_buffer().
+ *
+ * We use a local buffer for two reasons: an NMI or similar
+ * interrupt cannot synchronise, and high sampling rates
+ * would lead to catastrophic global synchronisation if
+ * a global buffer was used.
+ */
+
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+#include "oprof.h"
+
+#define OP_BUFFER_FLAGS        0
+
+static struct ring_buffer *op_ring_buffer;
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
+
+static void wq_sync_buffer(struct work_struct *work);
+
+#define DEFAULT_TIMER_EXPIRE (HZ / 10)
+static int work_enabled;
+
+unsigned long oprofile_get_cpu_buffer_size(void)
+{
+       return oprofile_cpu_buffer_size;
+}
+
+void oprofile_cpu_buffer_inc_smpl_lost(void)
+{
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+
+       cpu_buf->sample_lost_overflow++;
+}
+
+void free_cpu_buffers(void)
+{
+       if (op_ring_buffer)
+               ring_buffer_free(op_ring_buffer);
+       op_ring_buffer = NULL;
+}
+
+#define RB_EVENT_HDR_SIZE 4
+
+int alloc_cpu_buffers(void)
+{
+       int i;
+
+       unsigned long buffer_size = oprofile_cpu_buffer_size;
+       unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
+                                                RB_EVENT_HDR_SIZE);
+
+       op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+       if (!op_ring_buffer)
+               goto fail;
+
+       for_each_possible_cpu(i) {
+               struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
+
+               b->last_task = NULL;
+               b->last_is_kernel = -1;
+               b->tracing = 0;
+               b->buffer_size = buffer_size;
+               b->sample_received = 0;
+               b->sample_lost_overflow = 0;
+               b->backtrace_aborted = 0;
+               b->sample_invalid_eip = 0;
+               b->cpu = i;
+               INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
+       }
+       return 0;
+
+fail:
+       free_cpu_buffers();
+       return -ENOMEM;
+}
+
+void start_cpu_work(void)
+{
+       int i;
+
+       work_enabled = 1;
+
+       for_each_online_cpu(i) {
+               struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
+
+               /*
+                * Spread the work by 1 jiffy per cpu so they dont all
+                * fire at once.
+                */
+               schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
+       }
+}
+
+void end_cpu_work(void)
+{
+       work_enabled = 0;
+}
+
+void flush_cpu_work(void)
+{
+       int i;
+
+       for_each_online_cpu(i) {
+               struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
+
+               /* these works are per-cpu, no need for flush_sync */
+               flush_delayed_work(&b->work);
+       }
+}
+
+/*
+ * This function prepares the cpu buffer to write a sample.
+ *
+ * Struct op_entry is used during operations on the ring buffer while
+ * struct op_sample contains the data that is stored in the ring
+ * buffer. Struct entry can be uninitialized. The function reserves a
+ * data array that is specified by size. Use
+ * op_cpu_buffer_write_commit() after preparing the sample. In case of
+ * errors a null pointer is returned, otherwise the pointer to the
+ * sample.
+ *
+ */
+struct op_sample
+*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
+{
+       entry->event = ring_buffer_lock_reserve
+               (op_ring_buffer, sizeof(struct op_sample) +
+                size * sizeof(entry->sample->data[0]));
+       if (!entry->event)
+               return NULL;
+       entry->sample = ring_buffer_event_data(entry->event);
+       entry->size = size;
+       entry->data = entry->sample->data;
+
+       return entry->sample;
+}
+
+int op_cpu_buffer_write_commit(struct op_entry *entry)
+{
+       return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
+}
+
+struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
+{
+       struct ring_buffer_event *e;
+       e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
+       if (!e)
+               return NULL;
+
+       entry->event = e;
+       entry->sample = ring_buffer_event_data(e);
+       entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
+               / sizeof(entry->sample->data[0]);
+       entry->data = entry->sample->data;
+       return entry->sample;
+}
+
+unsigned long op_cpu_buffer_entries(int cpu)
+{
+       return ring_buffer_entries_cpu(op_ring_buffer, cpu);
+}
+
+static int
+op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
+           int is_kernel, struct task_struct *task)
+{
+       struct op_entry entry;
+       struct op_sample *sample;
+       unsigned long flags;
+       int size;
+
+       flags = 0;
+
+       if (backtrace)
+               flags |= TRACE_BEGIN;
+
+       /* notice a switch from user->kernel or vice versa */
+       is_kernel = !!is_kernel;
+       if (cpu_buf->last_is_kernel != is_kernel) {
+               cpu_buf->last_is_kernel = is_kernel;
+               flags |= KERNEL_CTX_SWITCH;
+               if (is_kernel)
+                       flags |= IS_KERNEL;
+       }
+
+       /* notice a task switch */
+       if (cpu_buf->last_task != task) {
+               cpu_buf->last_task = task;
+               flags |= USER_CTX_SWITCH;
+       }
+
+       if (!flags)
+               /* nothing to do */
+               return 0;
+
+       if (flags & USER_CTX_SWITCH)
+               size = 1;
+       else
+               size = 0;
+
+       sample = op_cpu_buffer_write_reserve(&entry, size);
+       if (!sample)
+               return -ENOMEM;
+
+       sample->eip = ESCAPE_CODE;
+       sample->event = flags;
+
+       if (size)
+               op_cpu_buffer_add_data(&entry, (unsigned long)task);
+
+       op_cpu_buffer_write_commit(&entry);
+
+       return 0;
+}
+
+static inline int
+op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
+             unsigned long pc, unsigned long event)
+{
+       struct op_entry entry;
+       struct op_sample *sample;
+
+       sample = op_cpu_buffer_write_reserve(&entry, 0);
+       if (!sample)
+               return -ENOMEM;
+
+       sample->eip = pc;
+       sample->event = event;
+
+       return op_cpu_buffer_write_commit(&entry);
+}
+
+/*
+ * This must be safe from any context.
+ *
+ * is_kernel is needed because on some architectures you cannot
+ * tell if you are in kernel or user space simply by looking at
+ * pc. We tag this in the buffer by generating kernel enter/exit
+ * events whenever is_kernel changes
+ */
+static int
+log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
+          unsigned long backtrace, int is_kernel, unsigned long event,
+          struct task_struct *task)
+{
+       struct task_struct *tsk = task ? task : current;
+       cpu_buf->sample_received++;
+
+       if (pc == ESCAPE_CODE) {
+               cpu_buf->sample_invalid_eip++;
+               return 0;
+       }
+
+       if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
+               goto fail;
+
+       if (op_add_sample(cpu_buf, pc, event))
+               goto fail;
+
+       return 1;
+
+fail:
+       cpu_buf->sample_lost_overflow++;
+       return 0;
+}
+
+static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
+{
+       cpu_buf->tracing = 1;
+}
+
+static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
+{
+       cpu_buf->tracing = 0;
+}
+
+static inline void
+__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
+                         unsigned long event, int is_kernel,
+                         struct task_struct *task)
+{
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+       unsigned long backtrace = oprofile_backtrace_depth;
+
+       /*
+        * if log_sample() fail we can't backtrace since we lost the
+        * source of this event
+        */
+       if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
+               /* failed */
+               return;
+
+       if (!backtrace)
+               return;
+
+       oprofile_begin_trace(cpu_buf);
+       oprofile_ops.backtrace(regs, backtrace);
+       oprofile_end_trace(cpu_buf);
+}
+
+void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
+                               unsigned long event, int is_kernel,
+                               struct task_struct *task)
+{
+       __oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
+}
+
+void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
+                            unsigned long event, int is_kernel)
+{
+       __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
+}
+
+void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
+{
+       int is_kernel;
+       unsigned long pc;
+
+       if (likely(regs)) {
+               is_kernel = !user_mode(regs);
+               pc = profile_pc(regs);
+       } else {
+               is_kernel = 0;    /* This value will not be used */
+               pc = ESCAPE_CODE; /* as this causes an early return. */
+       }
+
+       __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
+}
+
+/*
+ * Add samples with data to the ring buffer.
+ *
+ * Use oprofile_add_data(&entry, val) to add data and
+ * oprofile_write_commit(&entry) to commit the sample.
+ */
+void
+oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
+                      unsigned long pc, int code, int size)
+{
+       struct op_sample *sample;
+       int is_kernel = !user_mode(regs);
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+
+       cpu_buf->sample_received++;
+
+       /* no backtraces for samples with data */
+       if (op_add_code(cpu_buf, 0, is_kernel, current))
+               goto fail;
+
+       sample = op_cpu_buffer_write_reserve(entry, size + 2);
+       if (!sample)
+               goto fail;
+       sample->eip = ESCAPE_CODE;
+       sample->event = 0;              /* no flags */
+
+       op_cpu_buffer_add_data(entry, code);
+       op_cpu_buffer_add_data(entry, pc);
+
+       return;
+
+fail:
+       entry->event = NULL;
+       cpu_buf->sample_lost_overflow++;
+}
+
+int oprofile_add_data(struct op_entry *entry, unsigned long val)
+{
+       if (!entry->event)
+               return 0;
+       return op_cpu_buffer_add_data(entry, val);
+}
+
+int oprofile_add_data64(struct op_entry *entry, u64 val)
+{
+       if (!entry->event)
+               return 0;
+       if (op_cpu_buffer_get_size(entry) < 2)
+               /*
+                * the function returns 0 to indicate a too small
+                * buffer, even if there is some space left
+                */
+               return 0;
+       if (!op_cpu_buffer_add_data(entry, (u32)val))
+               return 0;
+       return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
+}
+
+int oprofile_write_commit(struct op_entry *entry)
+{
+       if (!entry->event)
+               return -EINVAL;
+       return op_cpu_buffer_write_commit(entry);
+}
+
+void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
+{
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+       log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
+}
+
+void oprofile_add_trace(unsigned long pc)
+{
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+
+       if (!cpu_buf->tracing)
+               return;
+
+       /*
+        * broken frame can give an eip with the same value as an
+        * escape code, abort the trace if we get it
+        */
+       if (pc == ESCAPE_CODE)
+               goto fail;
+
+       if (op_add_sample(cpu_buf, pc, 0))
+               goto fail;
+
+       return;
+fail:
+       cpu_buf->tracing = 0;
+       cpu_buf->backtrace_aborted++;
+       return;
+}
+
+/*
+ * This serves to avoid cpu buffer overflow, and makes sure
+ * the task mortuary progresses
+ *
+ * By using schedule_delayed_work_on and then schedule_delayed_work
+ * we guarantee this will stay on the correct cpu
+ */
+static void wq_sync_buffer(struct work_struct *work)
+{
+       struct oprofile_cpu_buffer *b =
+               container_of(work, struct oprofile_cpu_buffer, work.work);
+       if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
+               cancel_delayed_work(&b->work);
+               return;
+       }
+       sync_buffer(b->cpu);
+
+       /* don't re-add the work if we're shutting down */
+       if (work_enabled)
+               schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
+}
diff --git a/kern/src/oprofile/cpu_buffer.h b/kern/src/oprofile/cpu_buffer.h
new file mode 100644 (file)
index 0000000..e1d097e
--- /dev/null
@@ -0,0 +1,121 @@
+/**
+ * @file cpu_buffer.h
+ *
+ * @remark Copyright 2002-2009 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#ifndef OPROFILE_CPU_BUFFER_H
+#define OPROFILE_CPU_BUFFER_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/ring_buffer.h>
+
+struct task_struct;
+
+int alloc_cpu_buffers(void);
+void free_cpu_buffers(void);
+
+void start_cpu_work(void);
+void end_cpu_work(void);
+void flush_cpu_work(void);
+
+/* CPU buffer is composed of such entries (which are
+ * also used for context switch notes)
+ */
+struct op_sample {
+       unsigned long eip;
+       unsigned long event;
+       unsigned long data[0];
+};
+
+struct op_entry;
+
+struct oprofile_cpu_buffer {
+       unsigned long buffer_size;
+       struct task_struct *last_task;
+       int last_is_kernel;
+       int tracing;
+       unsigned long sample_received;
+       unsigned long sample_lost_overflow;
+       unsigned long backtrace_aborted;
+       unsigned long sample_invalid_eip;
+       int cpu;
+       struct delayed_work work;
+};
+
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
+
+/*
+ * Resets the cpu buffer to a sane state.
+ *
+ * reset these to invalid values; the next sample collected will
+ * populate the buffer with proper values to initialize the buffer
+ */
+static inline void op_cpu_buffer_reset(int cpu)
+{
+       struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
+
+       cpu_buf->last_is_kernel = -1;
+       cpu_buf->last_task = NULL;
+}
+
+/*
+ * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
+ * called only if op_cpu_buffer_write_reserve() did not return NULL or
+ * entry->event != NULL, otherwise entry->size or entry->event will be
+ * used uninitialized.
+ */
+
+struct op_sample
+*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
+int op_cpu_buffer_write_commit(struct op_entry *entry);
+struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
+unsigned long op_cpu_buffer_entries(int cpu);
+
+/* returns the remaining free size of data in the entry */
+static inline
+int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
+{
+       if (!entry->size)
+               return 0;
+       *entry->data = val;
+       entry->size--;
+       entry->data++;
+       return entry->size;
+}
+
+/* returns the size of data in the entry */
+static inline
+int op_cpu_buffer_get_size(struct op_entry *entry)
+{
+       return entry->size;
+}
+
+/* returns 0 if empty or the size of data including the current value */
+static inline
+int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
+{
+       int size = entry->size;
+       if (!size)
+               return 0;
+       *val = *entry->data;
+       entry->size--;
+       entry->data++;
+       return size;
+}
+
+/* extra data flags */
+#define KERNEL_CTX_SWITCH      (1UL << 0)
+#define IS_KERNEL              (1UL << 1)
+#define TRACE_BEGIN            (1UL << 2)
+#define USER_CTX_SWITCH                (1UL << 3)
+
+#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/kern/src/oprofile/event_buffer.c b/kern/src/oprofile/event_buffer.c
new file mode 100644 (file)
index 0000000..39b1417
--- /dev/null
@@ -0,0 +1,195 @@
+/**
+ * @file event_buffer.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * This is the global event buffer that the user-space
+ * daemon reads from. The event buffer is an untyped array
+ * of unsigned longs. Entries are prefixed by the
+ * escape value ESCAPE_CODE followed by an identifying code.
+ */
+
+#include "oprof.h"
+#include "event_buffer.h"
+#include "oprofile_stats.h"
+
+DEFINE_MUTEX(buffer_mutex);
+
+static unsigned long buffer_opened;
+static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
+static unsigned long *event_buffer;
+static unsigned long buffer_size;
+static unsigned long buffer_watershed;
+static size_t buffer_pos;
+/* atomic_t because wait_event checks it outside of buffer_mutex */
+static atomic_t buffer_ready = ATOMIC_INIT(0);
+
+/*
+ * Add an entry to the event buffer. When we get near to the end we
+ * wake up the process sleeping on the read() of the file. To protect
+ * the event_buffer this function may only be called when buffer_mutex
+ * is set.
+ */
+void add_event_entry(unsigned long value)
+{
+       /*
+        * This shouldn't happen since all workqueues or handlers are
+        * canceled or flushed before the event buffer is freed.
+        */
+       if (!event_buffer) {
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       if (buffer_pos == buffer_size) {
+               atomic_inc(&oprofile_stats.event_lost_overflow);
+               return;
+       }
+
+       event_buffer[buffer_pos] = value;
+       if (++buffer_pos == buffer_size - buffer_watershed) {
+               atomic_set(&buffer_ready, 1);
+               wake_up(&buffer_wait);
+       }
+}
+
+
+/* Wake up the waiting process if any. This happens
+ * on "echo 0 >/dev/oprofile/enable" so the daemon
+ * processes the data remaining in the event buffer.
+ */
+void wake_up_buffer_waiter(void)
+{
+       mutex_lock(&buffer_mutex);
+       atomic_set(&buffer_ready, 1);
+       wake_up(&buffer_wait);
+       mutex_unlock(&buffer_mutex);
+}
+
+
+int alloc_event_buffer(void)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&oprofilefs_lock, flags);
+       buffer_size = oprofile_buffer_size;
+       buffer_watershed = oprofile_buffer_watershed;
+       raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
+
+       if (buffer_watershed >= buffer_size)
+               return -EINVAL;
+
+       buffer_pos = 0;
+       event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
+       if (!event_buffer)
+               return -ENOMEM;
+
+       return 0;
+}
+
+
+void free_event_buffer(void)
+{
+       mutex_lock(&buffer_mutex);
+       vfree(event_buffer);
+       buffer_pos = 0;
+       event_buffer = NULL;
+       mutex_unlock(&buffer_mutex);
+}
+
+
+static int event_buffer_open(struct inode *inode, struct file *file)
+{
+       int err = -EPERM;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (test_and_set_bit_lock(0, &buffer_opened))
+               return -EBUSY;
+
+       /* Register as a user of dcookies
+        * to ensure they persist for the lifetime of
+        * the open event file
+        */
+       err = -EINVAL;
+       file->private_data = dcookie_register();
+       if (!file->private_data)
+               goto out;
+
+       if ((err = oprofile_setup()))
+               goto fail;
+
+       /* NB: the actual start happens from userspace
+        * echo 1 >/dev/oprofile/enable
+        */
+
+       return nonseekable_open(inode, file);
+
+fail:
+       dcookie_unregister(file->private_data);
+out:
+       __clear_bit_unlock(0, &buffer_opened);
+       return err;
+}
+
+
+static int event_buffer_release(struct inode *inode, struct file *file)
+{
+       oprofile_stop();
+       oprofile_shutdown();
+       dcookie_unregister(file->private_data);
+       buffer_pos = 0;
+       atomic_set(&buffer_ready, 0);
+       __clear_bit_unlock(0, &buffer_opened);
+       return 0;
+}
+
+
+static ssize_t event_buffer_read(struct file *file, char __user *buf,
+                                size_t count, loff_t *offset)
+{
+       int retval = -EINVAL;
+       size_t const max = buffer_size * sizeof(unsigned long);
+
+       /* handling partial reads is more trouble than it's worth */
+       if (count != max || *offset)
+               return -EINVAL;
+
+       wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
+
+       if (signal_pending(current))
+               return -EINTR;
+
+       /* can't currently happen */
+       if (!atomic_read(&buffer_ready))
+               return -EAGAIN;
+
+       mutex_lock(&buffer_mutex);
+
+       /* May happen if the buffer is freed during pending reads. */
+       if (!event_buffer) {
+               retval = -EINTR;
+               goto out;
+       }
+
+       atomic_set(&buffer_ready, 0);
+
+       retval = -EFAULT;
+
+       count = buffer_pos * sizeof(unsigned long);
+
+       if (copy_to_user(buf, event_buffer, count))
+               goto out;
+
+       retval = count;
+       buffer_pos = 0;
+
+out:
+       mutex_unlock(&buffer_mutex);
+       return retval;
+}
+
diff --git a/kern/src/oprofile/event_buffer.h b/kern/src/oprofile/event_buffer.h
new file mode 100644 (file)
index 0000000..5042a68
--- /dev/null
@@ -0,0 +1,35 @@
+/**
+ * @file event_buffer.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef EVENT_BUFFER_H
+#define EVENT_BUFFER_H
+
+int alloc_event_buffer(void);
+
+void free_event_buffer(void);
+
+/**
+ * Add data to the event buffer.
+ * The data passed is free-form, but typically consists of
+ * file offsets, dcookies, context information, and ESCAPE codes.
+ */
+void add_event_entry(unsigned long data);
+
+/* wake up the process sleeping on the event file */
+void wake_up_buffer_waiter(void);
+
+#define INVALID_COOKIE ~0UL
+#define NO_COOKIE 0UL
+
+/* mutex between sync_cpu_buffers() and the
+ * file reading code.
+ */
+extern struct mutex buffer_mutex;
+
+#endif /* EVENT_BUFFER_H */
diff --git a/kern/src/oprofile/nmi_timer_int.c b/kern/src/oprofile/nmi_timer_int.c
new file mode 100644 (file)
index 0000000..76f1c93
--- /dev/null
@@ -0,0 +1,173 @@
+/**
+ * @file nmi_timer_int.c
+ *
+ * @remark Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
+static int ctr_running;
+
+static struct perf_event_attr nmi_timer_attr = {
+       .type           = PERF_TYPE_HARDWARE,
+       .config         = PERF_COUNT_HW_CPU_CYCLES,
+       .size           = sizeof(struct perf_event_attr),
+       .pinned         = 1,
+       .disabled       = 1,
+};
+
+static void nmi_timer_callback(struct perf_event *event,
+                              struct perf_sample_data *data,
+                              struct pt_regs *regs)
+{
+       event->hw.interrupts = 0;       /* don't throttle interrupts */
+       oprofile_add_sample(regs, 0);
+}
+
+static int nmi_timer_start_cpu(int cpu)
+{
+       struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+       if (!event) {
+               event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
+                                                        nmi_timer_callback, NULL);
+               if (IS_ERR(event))
+                       return PTR_ERR(event);
+               per_cpu(nmi_timer_events, cpu) = event;
+       }
+
+       if (event && ctr_running)
+               perf_event_enable(event);
+
+       return 0;
+}
+
+static void nmi_timer_stop_cpu(int cpu)
+{
+       struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+       if (event && ctr_running)
+               perf_event_disable(event);
+}
+
+static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
+                                 void *data)
+{
+       int cpu = (unsigned long)data;
+       switch (action) {
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE:
+               nmi_timer_start_cpu(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               nmi_timer_stop_cpu(cpu);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block nmi_timer_cpu_nb = {
+       .notifier_call = nmi_timer_cpu_notifier
+};
+
+static int nmi_timer_start(void)
+{
+       int cpu;
+
+       get_online_cpus();
+       ctr_running = 1;
+       for_each_online_cpu(cpu)
+               nmi_timer_start_cpu(cpu);
+       put_online_cpus();
+
+       return 0;
+}
+
+static void nmi_timer_stop(void)
+{
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu)
+               nmi_timer_stop_cpu(cpu);
+       ctr_running = 0;
+       put_online_cpus();
+}
+
+static void nmi_timer_shutdown(void)
+{
+       struct perf_event *event;
+       int cpu;
+
+       get_online_cpus();
+       unregister_cpu_notifier(&nmi_timer_cpu_nb);
+       for_each_possible_cpu(cpu) {
+               event = per_cpu(nmi_timer_events, cpu);
+               if (!event)
+                       continue;
+               perf_event_disable(event);
+               per_cpu(nmi_timer_events, cpu) = NULL;
+               perf_event_release_kernel(event);
+       }
+
+       put_online_cpus();
+}
+
+static int nmi_timer_setup(void)
+{
+       int cpu, err;
+       u64 period;
+
+       /* clock cycles per tick: */
+       period = (u64)cpu_khz * 1000;
+       do_div(period, HZ);
+       nmi_timer_attr.sample_period = period;
+
+       get_online_cpus();
+       err = register_cpu_notifier(&nmi_timer_cpu_nb);
+       if (err)
+               goto out;
+       /* can't attach events to offline cpus: */
+       for_each_online_cpu(cpu) {
+               err = nmi_timer_start_cpu(cpu);
+               if (err)
+                       break;
+       }
+       if (err)
+               nmi_timer_shutdown();
+out:
+       put_online_cpus();
+       return err;
+}
+
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
+{
+       int err = 0;
+
+       err = nmi_timer_setup();
+       if (err)
+               return err;
+       nmi_timer_shutdown();           /* only check, don't alloc */
+
+       ops->create_files       = NULL;
+       ops->setup              = nmi_timer_setup;
+       ops->shutdown           = nmi_timer_shutdown;
+       ops->start              = nmi_timer_start;
+       ops->stop               = nmi_timer_stop;
+       ops->cpu_type           = "timer";
+
+       printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
+
+       return 0;
+}
+
+#endif
diff --git a/kern/src/oprofile/oprof.c b/kern/src/oprofile/oprof.c
new file mode 100644 (file)
index 0000000..927d97e
--- /dev/null
@@ -0,0 +1,283 @@
+/**
+ * @file oprof.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <vfs.h>
+#include <kfs.h>
+#include <slab.h>
+#include <kmalloc.h>
+#include <kref.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+#include <error.h>
+#include <cpio.h>
+#include <pmap.h>
+#include <smp.h>
+#include <ip.h>
+
+#include "oprof.h"
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+#include "oprofile_stats.h"
+
+struct oprofile_operations oprofile_ops;
+
+unsigned long oprofile_started;
+unsigned long oprofile_backtrace_depth;
+static unsigned long is_setup;
+static DEFINE_MUTEX(start_mutex);
+
+/* timer
+   0 - use performance monitoring hardware if available
+   1 - use the timer int mechanism regardless
+ */
+static int timer = 0;
+
+int oprofile_setup(void)
+{
+       int err;
+
+       mutex_lock(&start_mutex);
+
+       if ((err = alloc_cpu_buffers()))
+               goto out;
+
+       if ((err = alloc_event_buffer()))
+               goto out1;
+
+       if (oprofile_ops.setup && (err = oprofile_ops.setup()))
+               goto out2;
+
+       /* Note even though this starts part of the
+        * profiling overhead, it's necessary to prevent
+        * us missing task deaths and eventually oopsing
+        * when trying to process the event buffer.
+        */
+       if (oprofile_ops.sync_start) {
+               int sync_ret = oprofile_ops.sync_start();
+               switch (sync_ret) {
+               case 0:
+                       goto post_sync;
+               case 1:
+                       goto do_generic;
+               case -1:
+                       goto out3;
+               default:
+                       goto out3;
+               }
+       }
+do_generic:
+       if ((err = sync_start()))
+               goto out3;
+
+post_sync:
+       is_setup = 1;
+       mutex_unlock(&start_mutex);
+       return 0;
+
+out3:
+       if (oprofile_ops.shutdown)
+               oprofile_ops.shutdown();
+out2:
+       free_event_buffer();
+out1:
+       free_cpu_buffers();
+out:
+       mutex_unlock(&start_mutex);
+       return err;
+}
+
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void switch_worker(void)
+
+static void start_switch_worker(void)
+{
+//     if (oprofile_ops.switch_events)
+//             schedule_delayed_work(&switch_work, oprofile_time_slice);
+}
+
+static void stop_switch_worker(void)
+{
+//     cancel_delayed_work_sync(&switch_work);
+}
+
+static void switch_worker(struct work_struct *work)
+{
+       if (oprofile_ops.switch_events())
+               return;
+
+       atomic_inc(&oprofile_stats.multiplex_counter);
+       start_switch_worker();
+}
+
+/* User inputs in ms, converts to jiffies */
+int oprofile_set_timeout(unsigned long val_msec)
+{
+       int err = 0;
+       unsigned long time_slice;
+
+       mutex_lock(&start_mutex);
+
+       if (oprofile_started) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (!oprofile_ops.switch_events) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       time_slice = msecs_to_jiffies(val_msec);
+       if (time_slice == MAX_JIFFY_OFFSET) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       oprofile_time_slice = time_slice;
+
+out:
+       mutex_unlock(&start_mutex);
+       return err;
+
+}
+
+#else
+
+static inline void start_switch_worker(void) { }
+static inline void stop_switch_worker(void) { }
+
+#endif
+
+/* Actually start profiling (echo 1>/dev/oprofile/enable) */
+int oprofile_start(void)
+{
+       int err = -EINVAL;
+
+       mutex_lock(&start_mutex);
+
+       if (!is_setup)
+               goto out;
+
+       err = 0;
+
+       if (oprofile_started)
+               goto out;
+
+       oprofile_reset_stats();
+
+       if ((err = oprofile_ops.start()))
+               goto out;
+
+       start_switch_worker();
+
+       oprofile_started = 1;
+out:
+       mutex_unlock(&start_mutex);
+       return err;
+}
+
+
+/* echo 0>/dev/oprofile/enable */
+void oprofile_stop(void)
+{
+       mutex_lock(&start_mutex);
+       if (!oprofile_started)
+               goto out;
+       oprofile_ops.stop();
+       oprofile_started = 0;
+
+       stop_switch_worker();
+
+       /* wake up the daemon to read what remains */
+       wake_up_buffer_waiter();
+out:
+       mutex_unlock(&start_mutex);
+}
+
+
+void oprofile_shutdown(void)
+{
+       mutex_lock(&start_mutex);
+       if (oprofile_ops.sync_stop) {
+               int sync_ret = oprofile_ops.sync_stop();
+               switch (sync_ret) {
+               case 0:
+                       goto post_sync;
+               case 1:
+                       goto do_generic;
+               default:
+                       goto post_sync;
+               }
+       }
+do_generic:
+       sync_stop();
+post_sync:
+       if (oprofile_ops.shutdown)
+               oprofile_ops.shutdown();
+       is_setup = 0;
+       free_event_buffer();
+       free_cpu_buffers();
+       mutex_unlock(&start_mutex);
+}
+
+int oprofile_set_ulong(unsigned long *addr, unsigned long val)
+{
+       int err = -EBUSY;
+
+       mutex_lock(&start_mutex);
+       if (!oprofile_started) {
+               *addr = val;
+               err = 0;
+       }
+       mutex_unlock(&start_mutex);
+
+       return err;
+}
+
+static int timer_mode;
+
+static int __init oprofile_init(void)
+{
+       int err;
+
+       /* always init architecture to setup backtrace support */
+       timer_mode = 0;
+       err = oprofile_arch_init(&oprofile_ops);
+       if (!err) {
+               if (!timer && !oprofilefs_register())
+                       return 0;
+               oprofile_arch_exit();
+       }
+
+       /* setup timer mode: */
+       timer_mode = 1;
+       /* no nmi timer mode if oprofile.timer is set */
+       if (timer || op_nmi_timer_init(&oprofile_ops)) {
+               err = oprofile_timer_init(&oprofile_ops);
+               if (err)
+                       return err;
+       }
+
+       return oprofilefs_register();
+}
+
+
+static void __exit oprofile_exit(void)
+{
+       if (!timer_mode)
+               oprofile_arch_exit();
+}
+
+
+//MODULE_LICENSE("GPL");
+//MODULE_AUTHOR("John Levon <levon@movementarian.org>");
+//MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/kern/src/oprofile/oprof.h b/kern/src/oprofile/oprof.h
new file mode 100644 (file)
index 0000000..1006fa7
--- /dev/null
@@ -0,0 +1,36 @@
+/**
+ * @file oprof.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROF_H
+#define OPROF_H
+
+int oprofile_setup(void);
+void oprofile_shutdown(void);
+
+int oprofile_start(void);
+void oprofile_stop(void);
+
+struct oprofile_operations;
+
+extern unsigned long oprofile_buffer_size;
+extern unsigned long oprofile_cpu_buffer_size;
+extern unsigned long oprofile_buffer_watershed;
+extern unsigned long oprofile_time_slice;
+
+extern struct oprofile_operations oprofile_ops;
+extern unsigned long oprofile_started;
+extern unsigned long oprofile_backtrace_depth;
+
+int oprofile_timer_init(void);
+int op_nmi_timer_init(void);
+
+int oprofile_set_ulong(unsigned long *addr, unsigned long val);
+int oprofile_set_timeout(unsigned long time);
+
+#endif /* OPROF_H */
diff --git a/kern/src/oprofile/oprofile_files.c b/kern/src/oprofile/oprofile_files.c
new file mode 100644 (file)
index 0000000..84a208d
--- /dev/null
@@ -0,0 +1,201 @@
+/**
+ * @file oprofile_files.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/fs.h>
+#include <linux/oprofile.h>
+#include <linux/jiffies.h>
+
+#include "event_buffer.h"
+#include "oprofile_stats.h"
+#include "oprof.h"
+
+#define BUFFER_SIZE_DEFAULT            131072
+#define CPU_BUFFER_SIZE_DEFAULT                8192
+#define BUFFER_WATERSHED_DEFAULT       32768   /* FIXME: tune */
+#define TIME_SLICE_DEFAULT             1
+
+unsigned long oprofile_buffer_size;
+unsigned long oprofile_cpu_buffer_size;
+unsigned long oprofile_buffer_watershed;
+unsigned long oprofile_time_slice;
+
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static ssize_t timeout_read(struct file *file, char __user *buf,
+               size_t count, loff_t *offset)
+{
+       return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
+                                       buf, count, offset);
+}
+
+
+static ssize_t timeout_write(struct file *file, char const __user *buf,
+               size_t count, loff_t *offset)
+{
+       unsigned long val;
+       int retval;
+
+       if (*offset)
+               return -EINVAL;
+
+       retval = oprofilefs_ulong_from_user(&val, buf, count);
+       if (retval <= 0)
+               return retval;
+
+       retval = oprofile_set_timeout(val);
+
+       if (retval)
+               return retval;
+       return count;
+}
+
+
+static const struct file_operations timeout_fops = {
+       .read           = timeout_read,
+       .write          = timeout_write,
+       .llseek         = default_llseek,
+};
+
+#endif
+
+
+static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+       return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
+                                       offset);
+}
+
+
+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+       unsigned long val;
+       int retval;
+
+       if (*offset)
+               return -EINVAL;
+
+       if (!oprofile_ops.backtrace)
+               return -EINVAL;
+
+       retval = oprofilefs_ulong_from_user(&val, buf, count);
+       if (retval <= 0)
+               return retval;
+
+       retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
+       if (retval)
+               return retval;
+
+       return count;
+}
+
+
+static const struct file_operations depth_fops = {
+       .read           = depth_read,
+       .write          = depth_write,
+       .llseek         = default_llseek,
+};
+
+
+static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+       return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
+}
+
+
+static const struct file_operations pointer_size_fops = {
+       .read           = pointer_size_read,
+       .llseek         = default_llseek,
+};
+
+
+static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+       return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
+}
+
+
+static const struct file_operations cpu_type_fops = {
+       .read           = cpu_type_read,
+       .llseek         = default_llseek,
+};
+
+
+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+       return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
+}
+
+
+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+       unsigned long val;
+       int retval;
+
+       if (*offset)
+               return -EINVAL;
+
+       retval = oprofilefs_ulong_from_user(&val, buf, count);
+       if (retval <= 0)
+               return retval;
+
+       retval = 0;
+       if (val)
+               retval = oprofile_start();
+       else
+               oprofile_stop();
+
+       if (retval)
+               return retval;
+       return count;
+}
+
+
+static const struct file_operations enable_fops = {
+       .read           = enable_read,
+       .write          = enable_write,
+       .llseek         = default_llseek,
+};
+
+
+static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+       wake_up_buffer_waiter();
+       return count;
+}
+
+
+static const struct file_operations dump_fops = {
+       .write          = dump_write,
+       .llseek         = noop_llseek,
+};
+
+void oprofile_create_files(struct super_block *sb, struct dentry *root)
+{
+       /* reinitialize default values */
+       oprofile_buffer_size =          BUFFER_SIZE_DEFAULT;
+       oprofile_cpu_buffer_size =      CPU_BUFFER_SIZE_DEFAULT;
+       oprofile_buffer_watershed =     BUFFER_WATERSHED_DEFAULT;
+       oprofile_time_slice =           msecs_to_jiffies(TIME_SLICE_DEFAULT);
+
+       oprofilefs_create_file(sb, root, "enable", &enable_fops);
+       oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
+       oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+       oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
+       oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
+       oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
+       oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
+       oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
+       oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+       oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
+#endif
+       oprofile_create_stats_files(sb, root);
+       if (oprofile_ops.create_files)
+               oprofile_ops.create_files(sb, root);
+}
diff --git a/kern/src/oprofile/oprofile_perf.c b/kern/src/oprofile/oprofile_perf.c
new file mode 100644 (file)
index 0000000..877590d
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2010 ARM Ltd.
+ * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
+ *
+ * Perf-events backend for OProfile.
+ */
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/oprofile.h>
+#include <linux/slab.h>
+
+/*
+ * Per performance monitor configuration as set via oprofilefs.
+ */
+struct op_counter_config {
+       unsigned long count;
+       unsigned long enabled;
+       unsigned long event;
+       unsigned long unit_mask;
+       unsigned long kernel;
+       unsigned long user;
+       struct perf_event_attr attr;
+};
+
+static int oprofile_perf_enabled;
+static DEFINE_MUTEX(oprofile_perf_mutex);
+
+static struct op_counter_config *counter_config;
+static DEFINE_PER_CPU(struct perf_event **, perf_events);
+static int num_counters;
+
+/*
+ * Overflow callback for oprofile.
+ */
+static void op_overflow_handler(struct perf_event *event,
+                       struct perf_sample_data *data, struct pt_regs *regs)
+{
+       int id;
+       u32 cpu = smp_processor_id();
+
+       for (id = 0; id < num_counters; ++id)
+               if (per_cpu(perf_events, cpu)[id] == event)
+                       break;
+
+       if (id != num_counters)
+               oprofile_add_sample(regs, id);
+       else
+               pr_warning("oprofile: ignoring spurious overflow "
+                               "on cpu %u\n", cpu);
+}
+
+/*
+ * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
+ * settings in counter_config. Attributes are created as `pinned' events and
+ * so are permanently scheduled on the PMU.
+ */
+static void op_perf_setup(void)
+{
+       int i;
+       u32 size = sizeof(struct perf_event_attr);
+       struct perf_event_attr *attr;
+
+       for (i = 0; i < num_counters; ++i) {
+               attr = &counter_config[i].attr;
+               memset(attr, 0, size);
+               attr->type              = PERF_TYPE_RAW;
+               attr->size              = size;
+               attr->config            = counter_config[i].event;
+               attr->sample_period     = counter_config[i].count;
+               attr->pinned            = 1;
+       }
+}
+
+static int op_create_counter(int cpu, int event)
+{
+       struct perf_event *pevent;
+
+       if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
+               return 0;
+
+       pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
+                                                 cpu, NULL,
+                                                 op_overflow_handler, NULL);
+
+       if (IS_ERR(pevent))
+               return PTR_ERR(pevent);
+
+       if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+               perf_event_release_kernel(pevent);
+               pr_warning("oprofile: failed to enable event %d "
+                               "on CPU %d\n", event, cpu);
+               return -EBUSY;
+       }
+
+       per_cpu(perf_events, cpu)[event] = pevent;
+
+       return 0;
+}
+
+static void op_destroy_counter(int cpu, int event)
+{
+       struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
+
+       if (pevent) {
+               perf_event_release_kernel(pevent);
+               per_cpu(perf_events, cpu)[event] = NULL;
+       }
+}
+
+/*
+ * Called by oprofile_perf_start to create active perf events based on the
+ * perviously configured attributes.
+ */
+static int op_perf_start(void)
+{
+       int cpu, event, ret = 0;
+
+       for_each_online_cpu(cpu) {
+               for (event = 0; event < num_counters; ++event) {
+                       ret = op_create_counter(cpu, event);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Called by oprofile_perf_stop at the end of a profiling run.
+ */
+static void op_perf_stop(void)
+{
+       int cpu, event;
+
+       for_each_online_cpu(cpu)
+               for (event = 0; event < num_counters; ++event)
+                       op_destroy_counter(cpu, event);
+}
+
+static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
+{
+       unsigned int i;
+
+       for (i = 0; i < num_counters; i++) {
+               struct dentry *dir;
+               char buf[4];
+
+               snprintf(buf, sizeof buf, "%d", i);
+               dir = oprofilefs_mkdir(sb, root, buf);
+               oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
+               oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
+               oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
+               oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
+               oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
+               oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+       }
+
+       return 0;
+}
+
+static int oprofile_perf_setup(void)
+{
+       raw_spin_lock(&oprofilefs_lock);
+       op_perf_setup();
+       raw_spin_unlock(&oprofilefs_lock);
+       return 0;
+}
+
+static int oprofile_perf_start(void)
+{
+       int ret = -EBUSY;
+
+       mutex_lock(&oprofile_perf_mutex);
+       if (!oprofile_perf_enabled) {
+               ret = 0;
+               op_perf_start();
+               oprofile_perf_enabled = 1;
+       }
+       mutex_unlock(&oprofile_perf_mutex);
+       return ret;
+}
+
+static void oprofile_perf_stop(void)
+{
+       mutex_lock(&oprofile_perf_mutex);
+       if (oprofile_perf_enabled)
+               op_perf_stop();
+       oprofile_perf_enabled = 0;
+       mutex_unlock(&oprofile_perf_mutex);
+}
+
+void oprofile_perf_exit(void)
+{
+       int cpu, id;
+       struct perf_event *event;
+
+       for_each_possible_cpu(cpu) {
+               for (id = 0; id < num_counters; ++id) {
+                       event = per_cpu(perf_events, cpu)[id];
+                       if (event)
+                               perf_event_release_kernel(event);
+               }
+
+               kfree(per_cpu(perf_events, cpu));
+       }
+
+       kfree(counter_config);
+       exit_driverfs();
+}
+
+int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+       int cpu, ret = 0;
+
+       ret = init_driverfs();
+       if (ret)
+               return ret;
+
+       num_counters = perf_num_counters();
+       if (num_counters <= 0) {
+               pr_info("oprofile: no performance counters\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       counter_config = kcalloc(num_counters,
+                       sizeof(struct op_counter_config), GFP_KERNEL);
+
+       if (!counter_config) {
+               pr_info("oprofile: failed to allocate %d "
+                               "counters\n", num_counters);
+               ret = -ENOMEM;
+               num_counters = 0;
+               goto out;
+       }
+
+       for_each_possible_cpu(cpu) {
+               per_cpu(perf_events, cpu) = kcalloc(num_counters,
+                               sizeof(struct perf_event *), GFP_KERNEL);
+               if (!per_cpu(perf_events, cpu)) {
+                       pr_info("oprofile: failed to allocate %d perf events "
+                                       "for cpu %d\n", num_counters, cpu);
+                       ret = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       ops->create_files       = oprofile_perf_create_files;
+       ops->setup              = oprofile_perf_setup;
+       ops->start              = oprofile_perf_start;
+       ops->stop               = oprofile_perf_stop;
+       ops->shutdown           = oprofile_perf_stop;
+       ops->cpu_type           = op_name_from_perf_id();
+
+       if (!ops->cpu_type)
+               ret = -ENODEV;
+       else
+               pr_info("oprofile: using %s\n", ops->cpu_type);
+
+out:
+       if (ret)
+               oprofile_perf_exit();
+
+       return ret;
+}
diff --git a/kern/src/oprofile/oprofile_stats.c b/kern/src/oprofile/oprofile_stats.c
new file mode 100644 (file)
index 0000000..917d28e
--- /dev/null
@@ -0,0 +1,84 @@
+/**
+ * @file oprofile_stats.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#include <linux/oprofile.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/threads.h>
+
+#include "oprofile_stats.h"
+#include "cpu_buffer.h"
+
+struct oprofile_stat_struct oprofile_stats;
+
+void oprofile_reset_stats(void)
+{
+       struct oprofile_cpu_buffer *cpu_buf;
+       int i;
+
+       for_each_possible_cpu(i) {
+               cpu_buf = &per_cpu(op_cpu_buffer, i);
+               cpu_buf->sample_received = 0;
+               cpu_buf->sample_lost_overflow = 0;
+               cpu_buf->backtrace_aborted = 0;
+               cpu_buf->sample_invalid_eip = 0;
+       }
+
+       atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+       atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+       atomic_set(&oprofile_stats.event_lost_overflow, 0);
+       atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+       atomic_set(&oprofile_stats.multiplex_counter, 0);
+}
+
+
+void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
+{
+       struct oprofile_cpu_buffer *cpu_buf;
+       struct dentry *cpudir;
+       struct dentry *dir;
+       char buf[10];
+       int i;
+
+       dir = oprofilefs_mkdir(sb, root, "stats");
+       if (!dir)
+               return;
+
+       for_each_possible_cpu(i) {
+               cpu_buf = &per_cpu(op_cpu_buffer, i);
+               snprintf(buf, 10, "cpu%d", i);
+               cpudir = oprofilefs_mkdir(sb, dir, buf);
+
+               /* Strictly speaking access to these ulongs is racy,
+                * but we can't simply lock them, and they are
+                * informational only.
+                */
+               oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
+                       &cpu_buf->sample_received);
+               oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
+                       &cpu_buf->sample_lost_overflow);
+               oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
+                       &cpu_buf->backtrace_aborted);
+               oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
+                       &cpu_buf->sample_invalid_eip);
+       }
+
+       oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
+               &oprofile_stats.sample_lost_no_mm);
+       oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
+               &oprofile_stats.sample_lost_no_mapping);
+       oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
+               &oprofile_stats.event_lost_overflow);
+       oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
+               &oprofile_stats.bt_lost_no_mapping);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+       oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
+               &oprofile_stats.multiplex_counter);
+#endif
+}
diff --git a/kern/src/oprofile/oprofile_stats.h b/kern/src/oprofile/oprofile_stats.h
new file mode 100644 (file)
index 0000000..38b6fc0
--- /dev/null
@@ -0,0 +1,34 @@
+/**
+ * @file oprofile_stats.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#ifndef OPROFILE_STATS_H
+#define OPROFILE_STATS_H
+
+#include <linux/atomic.h>
+
+struct oprofile_stat_struct {
+       atomic_t sample_lost_no_mm;
+       atomic_t sample_lost_no_mapping;
+       atomic_t bt_lost_no_mapping;
+       atomic_t event_lost_overflow;
+       atomic_t multiplex_counter;
+};
+
+extern struct oprofile_stat_struct oprofile_stats;
+
+/* reset all stats to zero */
+void oprofile_reset_stats(void);
+
+struct super_block;
+struct dentry;
+
+/* create the stats/ dir */
+void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
+
+#endif /* OPROFILE_STATS_H */
diff --git a/kern/src/oprofile/oprofilefs.c b/kern/src/oprofile/oprofilefs.c
new file mode 100644 (file)
index 0000000..849357c
--- /dev/null
@@ -0,0 +1,280 @@
+/**
+ * @file oprofilefs.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ *
+ * A simple filesystem for configuration and
+ * access of oprofile.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/oprofile.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <asm/uaccess.h>
+
+#include "oprof.h"
+
+#define OPROFILEFS_MAGIC 0x6f70726f
+
+DEFINE_RAW_SPINLOCK(oprofilefs_lock);
+
+static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
+{
+       struct inode *inode = new_inode(sb);
+
+       if (inode) {
+               inode->i_ino = get_next_ino();
+               inode->i_mode = mode;
+               inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       }
+       return inode;
+}
+
+
+static const struct super_operations s_ops = {
+       .statfs         = simple_statfs,
+       .drop_inode     = generic_delete_inode,
+};
+
+
+ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
+{
+       return simple_read_from_buffer(buf, count, offset, str, strlen(str));
+}
+
+
+#define TMPBUFSIZE 50
+
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
+{
+       char tmpbuf[TMPBUFSIZE];
+       size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+       if (maxlen > TMPBUFSIZE)
+               maxlen = TMPBUFSIZE;
+       return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+}
+
+
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
+int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
+{
+       char tmpbuf[TMPBUFSIZE];
+       unsigned long flags;
+
+       if (!count)
+               return 0;
+
+       if (count > TMPBUFSIZE - 1)
+               return -EINVAL;
+
+       memset(tmpbuf, 0x0, TMPBUFSIZE);
+
+       if (copy_from_user(tmpbuf, buf, count))
+               return -EFAULT;
+
+       raw_spin_lock_irqsave(&oprofilefs_lock, flags);
+       *val = simple_strtoul(tmpbuf, NULL, 0);
+       raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
+       return count;
+}
+
+
+static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+       unsigned long *val = file->private_data;
+       return oprofilefs_ulong_to_user(*val, buf, count, offset);
+}
+
+
+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
+{
+       unsigned long value;
+       int retval;
+
+       if (*offset)
+               return -EINVAL;
+
+       retval = oprofilefs_ulong_from_user(&value, buf, count);
+       if (retval <= 0)
+               return retval;
+
+       retval = oprofile_set_ulong(file->private_data, value);
+       if (retval)
+               return retval;
+
+       return count;
+}
+
+
+static const struct file_operations ulong_fops = {
+       .read           = ulong_read_file,
+       .write          = ulong_write_file,
+       .open           = simple_open,
+       .llseek         = default_llseek,
+};
+
+
+static const struct file_operations ulong_ro_fops = {
+       .read           = ulong_read_file,
+       .open           = simple_open,
+       .llseek         = default_llseek,
+};
+
+
+static int __oprofilefs_create_file(struct super_block *sb,
+       struct dentry *root, char const *name, const struct file_operations *fops,
+       int perm, void *priv)
+{
+       struct dentry *dentry;
+       struct inode *inode;
+
+       dentry = d_alloc_name(root, name);
+       if (!dentry)
+               return -ENOMEM;
+       inode = oprofilefs_get_inode(sb, S_IFREG | perm);
+       if (!inode) {
+               dput(dentry);
+               return -ENOMEM;
+       }
+       inode->i_fop = fops;
+       d_add(dentry, inode);
+       dentry->d_inode->i_private = priv;
+       return 0;
+}
+
+
+int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
+       char const *name, unsigned long *val)
+{
+       return __oprofilefs_create_file(sb, root, name,
+                                       &ulong_fops, 0644, val);
+}
+
+
+int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+       char const *name, unsigned long *val)
+{
+       return __oprofilefs_create_file(sb, root, name,
+                                       &ulong_ro_fops, 0444, val);
+}
+
+
+static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
+{
+       atomic_t *val = file->private_data;
+       return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
+}
+
+
+static const struct file_operations atomic_ro_fops = {
+       .read           = atomic_read_file,
+       .open           = simple_open,
+       .llseek         = default_llseek,
+};
+
+
+int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+       char const *name, atomic_t *val)
+{
+       return __oprofilefs_create_file(sb, root, name,
+                                       &atomic_ro_fops, 0444, val);
+}
+
+
+int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
+       char const *name, const struct file_operations *fops)
+{
+       return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
+}
+
+
+int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
+       char const *name, const struct file_operations *fops, int perm)
+{
+       return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
+}
+
+
+struct dentry *oprofilefs_mkdir(struct super_block *sb,
+       struct dentry *root, char const *name)
+{
+       struct dentry *dentry;
+       struct inode *inode;
+
+       dentry = d_alloc_name(root, name);
+       if (!dentry)
+               return NULL;
+       inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+       if (!inode) {
+               dput(dentry);
+               return NULL;
+       }
+       inode->i_op = &simple_dir_inode_operations;
+       inode->i_fop = &simple_dir_operations;
+       d_add(dentry, inode);
+       return dentry;
+}
+
+
+static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
+{
+       struct inode *root_inode;
+
+       sb->s_blocksize = PAGE_CACHE_SIZE;
+       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_magic = OPROFILEFS_MAGIC;
+       sb->s_op = &s_ops;
+       sb->s_time_gran = 1;
+
+       root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+       if (!root_inode)
+               return -ENOMEM;
+       root_inode->i_op = &simple_dir_inode_operations;
+       root_inode->i_fop = &simple_dir_operations;
+       sb->s_root = d_make_root(root_inode);
+       if (!sb->s_root)
+               return -ENOMEM;
+
+       oprofile_create_files(sb, sb->s_root);
+
+       // FIXME: verify kill_litter_super removes our dentries
+       return 0;
+}
+
+
+static struct dentry *oprofilefs_mount(struct file_system_type *fs_type,
+       int flags, const char *dev_name, void *data)
+{
+       return mount_single(fs_type, flags, data, oprofilefs_fill_super);
+}
+
+
+static struct file_system_type oprofilefs_type = {
+       .owner          = THIS_MODULE,
+       .name           = "oprofilefs",
+       .mount          = oprofilefs_mount,
+       .kill_sb        = kill_litter_super,
+};
+
+
+int __init oprofilefs_register(void)
+{
+       return register_filesystem(&oprofilefs_type);
+}
+
+
+void __exit oprofilefs_unregister(void)
+{
+       unregister_filesystem(&oprofilefs_type);
+}
diff --git a/kern/src/oprofile/timer_int.c b/kern/src/oprofile/timer_int.c
new file mode 100644 (file)
index 0000000..93404f7
--- /dev/null
@@ -0,0 +1,120 @@
+/**
+ * @file timer_int.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/smp.h>
+#include <linux/oprofile.h>
+#include <linux/profile.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/hrtimer.h>
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+
+#include "oprof.h"
+
+static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
+static int ctr_running;
+
+static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
+{
+       oprofile_add_sample(get_irq_regs(), 0);
+       hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
+       return HRTIMER_RESTART;
+}
+
+static void __oprofile_hrtimer_start(void *unused)
+{
+       struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
+
+       if (!ctr_running)
+               return;
+
+       hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtimer->function = oprofile_hrtimer_notify;
+
+       hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
+                     HRTIMER_MODE_REL_PINNED);
+}
+
+static int oprofile_hrtimer_start(void)
+{
+       get_online_cpus();
+       ctr_running = 1;
+       on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
+       put_online_cpus();
+       return 0;
+}
+
+static void __oprofile_hrtimer_stop(int cpu)
+{
+       struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
+
+       if (!ctr_running)
+               return;
+
+       hrtimer_cancel(hrtimer);
+}
+
+static void oprofile_hrtimer_stop(void)
+{
+       int cpu;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu)
+               __oprofile_hrtimer_stop(cpu);
+       ctr_running = 0;
+       put_online_cpus();
+}
+
+static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
+                                        unsigned long action, void *hcpu)
+{
+       long cpu = (long) hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               smp_call_function_single(cpu, __oprofile_hrtimer_start,
+                                        NULL, 1);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               __oprofile_hrtimer_stop(cpu);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata oprofile_cpu_notifier = {
+       .notifier_call = oprofile_cpu_notify,
+};
+
+static int oprofile_hrtimer_setup(void)
+{
+       return register_hotcpu_notifier(&oprofile_cpu_notifier);
+}
+
+static void oprofile_hrtimer_shutdown(void)
+{
+       unregister_hotcpu_notifier(&oprofile_cpu_notifier);
+}
+
+int oprofile_timer_init(struct oprofile_operations *ops)
+{
+       ops->create_files       = NULL;
+       ops->setup              = oprofile_hrtimer_setup;
+       ops->shutdown           = oprofile_hrtimer_shutdown;
+       ops->start              = oprofile_hrtimer_start;
+       ops->stop               = oprofile_hrtimer_stop;
+       ops->cpu_type           = "timer";
+       printk(KERN_INFO "oprofile: using timer interrupt.\n");
+       return 0;
+}