akaros/kern/src/time.c
<<
>>
Prefs
   1#include <arch/arch.h>
   2#include <time.h>
   3#include <stdio.h>
   4#include <schedule.h>
   5#include <multiboot.h>
   6#include <pmap.h>
   7#include <smp.h>
   8#include <ros/procinfo.h>
   9
  10/* Determines the overhead of tsc timing.  Note the start/stop calls are
  11 * inlined, so we're trying to determine the lowest amount of overhead
  12 * attainable by using the TSC (or whatever timing source).
  13 *
  14 * For more detailed TSC measurements, use test_rdtsc() in k/a/i/rdtsc_test.c */
  15static void train_timing(void)
  16{
  17        uint64_t min_overhead = UINT64_MAX;
  18        uint64_t max_overhead = 0;
  19        uint64_t time, diff;
  20        int8_t irq_state = 0;
  21
  22        /* Reset this, in case we run it again.  The use of start/stop to
  23         * determine the overhead relies on timing_overhead being 0. */
  24        __proc_global_info.tsc_overhead = 0;
  25        /* timing might use cpuid, in which case we warm it up to avoid some
  26         * extra variance */
  27        time = start_timing();
  28        diff = stop_timing(time);
  29        time = start_timing();
  30        diff = stop_timing(time);
  31        time = start_timing();
  32        diff = stop_timing(time);
  33        disable_irqsave(&irq_state);
  34        for (int i = 0; i < 10000; i++) {
  35                time = start_timing();
  36                diff = stop_timing(time);
  37                min_overhead = MIN(min_overhead, diff);
  38                max_overhead = MAX(max_overhead, diff);
  39        }
  40        enable_irqsave(&irq_state);
  41        __proc_global_info.tsc_overhead = min_overhead;
  42        printk("TSC overhead (Min: %llu, Max: %llu)\n", min_overhead,
  43               max_overhead); }
  44
  45/* Convenience wrapper called when a core's timer interrupt goes off.  Not to be
  46 * confused with global timers (like the PIC).  Do not put your code here.  If
  47 * you want something to happen in the future, set an alarm. */
  48void timer_interrupt(struct hw_trapframe *hw_tf, void *data)
  49{
  50        __trigger_tchain(&per_cpu_info[core_id()].tchain, hw_tf);
  51}
  52
  53/*
  54 * We use scaled integer arithmetic for converting between TSC clock cycles
  55 * and nanoseconds. In each case we use a fixed shift value of 32 which
  56 * gives a very high degree of accuracy.
  57 *
  58 * The actual scaling calculations rely on being able use the 128 bit
  59 * product of two unsigned 64 bit numbers as an intermediate result
  60 * in the calculation. Fortunately, on x86_64 at least, gcc's 128 bit
  61 * support is sufficiently good that it generates optimal code for this
  62 * calculation without the need to write any assembler.
  63 */
  64static inline uint64_t mult_shift_64(uint64_t a, uint64_t b, uint8_t shift)
  65{
  66        return ((unsigned __int128)a * b) >> shift;
  67}
  68
  69static uint64_t cycles_to_nsec_mult;
  70static uint64_t nsec_to_cycles_mult;
  71
  72#define CYCLES_TO_NSEC_SHIFT    32
  73#define NSEC_TO_CYCLES_SHIFT    32
  74
  75static void cycles_to_nsec_init(uint64_t tsc_freq_hz)
  76{
  77        cycles_to_nsec_mult = (NSEC_PER_SEC << CYCLES_TO_NSEC_SHIFT) / tsc_freq_hz;
  78}
  79
  80static void nsec_to_cycles_init(uint64_t tsc_freq_hz)
  81{
  82        uint64_t divisor = NSEC_PER_SEC;
  83
  84        /*
  85         * In the unlikely event that the TSC frequency is greater
  86         * than (1 << 32) we have to lose a little precision to
  87         * avoid overflow in the calculation of the multiplier.
  88         */
  89        while (tsc_freq_hz >= ((uint64_t)1 << NSEC_TO_CYCLES_SHIFT)) {
  90                tsc_freq_hz >>= 1;
  91                divisor >>= 1;
  92        }
  93        nsec_to_cycles_mult = (tsc_freq_hz << NSEC_TO_CYCLES_SHIFT) / divisor;
  94}
  95
  96uint64_t tsc2nsec(uint64_t tsc_time)
  97{
  98        return mult_shift_64(tsc_time, cycles_to_nsec_mult, CYCLES_TO_NSEC_SHIFT);
  99}
 100
 101uint64_t nsec2tsc(uint64_t nsec)
 102{
 103        return mult_shift_64(nsec, nsec_to_cycles_mult, NSEC_TO_CYCLES_SHIFT);
 104}
 105
 106/*
 107 * Return nanoseconds since the UNIX epoch, 1st January, 1970.
 108 */
 109uint64_t epoch_nsec(void)
 110{
 111        uint64_t cycles = read_tsc() - __proc_global_info.tsc_cycles_last;
 112
 113        return __proc_global_info.walltime_ns_last + tsc2nsec(cycles);
 114}
 115
 116void time_init(void)
 117{
 118        train_timing();
 119
 120        __proc_global_info.walltime_ns_last = read_persistent_clock();
 121        __proc_global_info.tsc_cycles_last  = read_tsc();
 122
 123        cycles_to_nsec_init(__proc_global_info.tsc_freq);
 124        nsec_to_cycles_init(__proc_global_info.tsc_freq);
 125}
 126