9 /* Determines the overhead of tsc timing. Note the start/stop calls are
10 * inlined, so we're trying to determine the lowest amount of overhead
11 * attainable by using the TSC (or whatever timing source).
13 * For more detailed TSC measurements, use test_rdtsc() in k/a/i/rdtsc_test.c */
16 uint64_t min_overhead = UINT64_MAX;
17 uint64_t max_overhead = 0;
21 /* Reset this, in case we run it again. The use of start/stop to determine
22 * the overhead relies on timing_overhead being 0. */
23 system_timing.timing_overhead = 0;
24 /* timing might use cpuid, in which case we warm it up to avoid some extra
26 time = start_timing();
27 diff = stop_timing(time);
28 time = start_timing();
29 diff = stop_timing(time);
30 time = start_timing();
31 diff = stop_timing(time);
32 disable_irqsave(&irq_state);
33 for (int i = 0; i < 10000; i++) {
34 time = start_timing();
35 diff = stop_timing(time);
36 min_overhead = MIN(min_overhead, diff);
37 max_overhead = MAX(max_overhead, diff);
39 enable_irqsave(&irq_state);
40 system_timing.timing_overhead = min_overhead;
41 printk("TSC overhead (Min: %llu, Max: %llu)\n", min_overhead, max_overhead);
44 void udelay_sched(uint64_t usec)
46 struct timer_chain *tchain = &per_cpu_info[core_id()].tchain;
47 struct alarm_waiter a_waiter;
48 init_awaiter(&a_waiter, 0);
49 set_awaiter_rel(&a_waiter, usec);
50 set_alarm(tchain, &a_waiter);
51 sleep_on_awaiter(&a_waiter);
54 /* Convenience wrapper called when a core's timer interrupt goes off. Not to be
55 * confused with global timers (like the PIC). Do not put your code here. If
56 * you want something to happen in the future, set an alarm. */
57 void timer_interrupt(struct hw_trapframe *hw_tf, void *data)
60 oprofile_add_backtrace(get_hwtf_pc(hw_tf), get_hwtf_fp(hw_tf));
62 oprofile_add_userpc(get_hwtf_pc(hw_tf));
64 __trigger_tchain(&per_cpu_info[core_id()].tchain, hw_tf);
67 /* We can overflow/wraparound when we multiply up, but we have to divide last,
68 * or else we lose precision. If we're too big and will overflow, we'll
69 * sacrifice precision for correctness, and degrade to the next lower level
70 * (losing 3 digits worth). The recursive case shouldn't overflow, since it
71 * called something that scaled down the tsc_time by more than 1000. */
72 uint64_t tsc2sec(uint64_t tsc_time)
74 return tsc_time / system_timing.tsc_freq;
77 uint64_t tsc2msec(uint64_t tsc_time)
79 if (mult_will_overflow_u64(tsc_time, 1000))
80 return tsc2sec(tsc_time) * 1000;
82 return (tsc_time * 1000) / system_timing.tsc_freq;
85 uint64_t tsc2usec(uint64_t tsc_time)
87 if (mult_will_overflow_u64(tsc_time, 1000000))
88 return tsc2msec(tsc_time) * 1000;
90 return (tsc_time * 1000000) / system_timing.tsc_freq;
93 uint64_t tsc2nsec(uint64_t tsc_time)
95 if (mult_will_overflow_u64(tsc_time, 1000000000))
96 return tsc2usec(tsc_time) * 1000;
98 return (tsc_time * 1000000000) / system_timing.tsc_freq;
101 uint64_t sec2tsc(uint64_t sec)
103 if (mult_will_overflow_u64(sec, system_timing.tsc_freq)) {
104 /* in this case, we simply can't express the number of ticks */
105 warn("Wraparound in sec2tsc(), rounding up");
106 return (uint64_t)(-1);
108 return sec * system_timing.tsc_freq;
112 uint64_t msec2tsc(uint64_t msec)
114 if (mult_will_overflow_u64(msec, system_timing.tsc_freq))
115 return sec2tsc(msec / 1000);
117 return (msec * system_timing.tsc_freq) / 1000;
120 uint64_t usec2tsc(uint64_t usec)
122 if (mult_will_overflow_u64(usec, system_timing.tsc_freq))
123 return msec2tsc(usec / 1000);
125 return (usec * system_timing.tsc_freq) / 1000000;
128 uint64_t nsec2tsc(uint64_t nsec)
130 if (mult_will_overflow_u64(nsec, system_timing.tsc_freq))
131 return usec2tsc(nsec / 1000);
133 return (nsec * system_timing.tsc_freq) / 1000000000;