Fixes preempt/indir tracing in lock_test
authorBarret Rhoden <brho@cs.berkeley.edu>
Tue, 3 Jun 2014 20:33:35 +0000 (13:33 -0700)
committerBarret Rhoden <brho@cs.berkeley.edu>
Tue, 3 Jun 2014 20:41:17 +0000 (13:41 -0700)
Much nicer than overriding the lock handler, and we don't need to expose the vc
and indir handlers anymore.

tests/lock_test.c
user/parlib/include/uthread.h
user/parlib/uthread.c

index cbcf333..0db4b22 100644 (file)
  *                                     still spins.  they'll have to make sure their pred runs)
  *                     -adj workers doesn't matter either...
  *                             - the 2LS and preemption handling might be doing this
- *                             automatically, when handle_preempt() does a
+ *                             automatically, when handle_vc_preempt() does a
  *                             thread_paused() on its current_uthread.
  *                             - adj_workers isn't critical if we're using some locks
  *                             that check notif_pending.  eventually someone hears
@@ -605,24 +605,22 @@ atomic_t indir_idx;
 atomic_t preempt_cnt;
 atomic_t indir_cnt;
 
-static void handle_preempt(struct event_msg *ev_msg, unsigned int ev_type,
-                           void *data)
+static void trace_preempt(struct event_msg *ev_msg, unsigned int ev_type,
+                          void *data)
 {
        unsigned long my_slot = atomic_fetch_and_add(&preempt_idx, 1);
        if (my_slot < MAX_NR_EVENT_TRACES)
                preempts[my_slot] = read_tsc();
        atomic_inc(&preempt_cnt);
-       handle_vc_preempt(ev_msg, ev_type, data);
 }
 
-static void handle_indir(struct event_msg *ev_msg, unsigned int ev_type,
-                         void *data)
+static void trace_indir(struct event_msg *ev_msg, unsigned int ev_type,
+                        void *data)
 {
        unsigned long my_slot = atomic_fetch_and_add(&indir_idx, 1);
        if (my_slot < MAX_NR_EVENT_TRACES)
                indirs[my_slot] = read_tsc();
        atomic_inc(&indir_cnt);
-       handle_vc_indir(ev_msg, ev_type, data);
 }
 
 /* Helper, prints out the preempt trace */
@@ -666,9 +664,8 @@ static void os_prep_work(int nr_threads)
        pthread_can_vcore_request(FALSE);       /* 2LS won't manage vcores */
        pthread_need_tls(FALSE);
        pthread_lib_init();                                     /* gives us one vcore */
-       /* TODO: register tracing handlers (old style was replacing) */
-//     register_ev_handler(EV_VCORE_PREEMPT, handle_preempt, 0);
-//     register_ev_handler(EV_CHECK_MSGS, handle_indir, 0);
+       register_ev_handler(EV_VCORE_PREEMPT, trace_preempt, 0);
+       register_ev_handler(EV_CHECK_MSGS, trace_indir, 0);
        if (pargs.fake_vc_ctx) {
                /* need to disable events when faking vc ctx.  since we're looping and
                 * not handling events, we could run OOM */
index d52596a..50faa90 100644 (file)
@@ -90,12 +90,6 @@ void run_current_uthread(void);
 void run_uthread(struct uthread *uthread);
 static inline struct uthread **get_cur_uth_addr(uint32_t vcoreid);
 
-/* Event handlers - exported globally so programs can wrap them */
-void handle_vc_preempt(struct event_msg *ev_msg, unsigned int ev_type,
-                       void *data);
-void handle_vc_indir(struct event_msg *ev_msg, unsigned int ev_type,
-                     void *data);
-
 /* Asking for trouble with this API, when we just want stacktop (or whatever
  * the SP will be). */
 static inline void init_uthread_ctx(struct uthread *uth, void (*entry)(void),
index a588765..31284cb 100644 (file)
@@ -24,6 +24,11 @@ static int __uthread_reinit_tls(struct uthread *uthread);
 static void __uthread_free_tls(struct uthread *uthread);
 static void __run_current_uthread_raw(void);
 
+static void handle_vc_preempt(struct event_msg *ev_msg, unsigned int ev_type,
+                              void *data);
+static void handle_vc_indir(struct event_msg *ev_msg, unsigned int ev_type,
+                            void *data);
+
 /* Block the calling uthread on sysc until it makes progress or is done */
 static void __ros_mcp_syscall_blockon(struct syscall *sysc);
 
@@ -837,8 +842,8 @@ out_we_returned:
 
 /* This handles a preemption message.  When this is done, either we recovered,
  * or recovery *for our message* isn't needed. */
-void handle_vc_preempt(struct event_msg *ev_msg, unsigned int ev_type,
-                       void *data)
+static void handle_vc_preempt(struct event_msg *ev_msg, unsigned int ev_type,
+                              void *data)
 {
        uint32_t vcoreid = vcore_id();
        struct preempt_data *vcpd = vcpd_of(vcoreid);
@@ -937,7 +942,8 @@ out_stealing:
  * their indirs, or the vcore restarted enough so that checking them is
  * unnecessary.  If that happens and they got preempted quickly, then another
  * preempt/check_indirs was sent out. */
-void handle_vc_indir(struct event_msg *ev_msg, unsigned int ev_type, void *data)
+static void handle_vc_indir(struct event_msg *ev_msg, unsigned int ev_type,
+                            void *data)
 {
        uint32_t vcoreid = vcore_id();
        uint32_t rem_vcoreid = ev_msg->ev_arg2;