akaros/kern/arch/x86/init.c
<<
>>
Prefs
   1/* See COPYRIGHT for copyright information. */
   2
   3#include <ros/common.h>
   4#include <smp.h>
   5#include <arch/x86.h>
   6#include <arch/pci.h>
   7#include <arch/console.h>
   8#include <arch/perfmon.h>
   9#include <arch/init.h>
  10#include <monitor.h>
  11#include <arch/usb.h>
  12#include <assert.h>
  13#include <ros/procinfo.h>
  14#include <cpu_feat.h>
  15
  16
  17struct ancillary_state x86_default_fpu;
  18uint32_t kerndate;
  19
  20#define capchar2ctl(x) ((x) - '@')
  21
  22/* irq handler for the console (kb, serial, etc) */
  23static void irq_console(struct hw_trapframe *hw_tf, void *data)
  24{
  25        uint8_t c;
  26        struct cons_dev *cdev = (struct cons_dev*)data;
  27        assert(cdev);
  28        if (cons_get_char(cdev, &c))
  29                return;
  30        /* Control code intercepts */
  31        switch (c) {
  32        case capchar2ctl('G'):
  33                /* traditional 'ctrl-g', will put you in the monitor gracefully
  34                 */
  35                send_kernel_message(core_id(), __run_mon, 0, 0, 0,
  36                                    KMSG_ROUTINE);
  37                return;
  38        case capchar2ctl('Q'):
  39                /* force you into the monitor.  you might deadlock. */
  40                printk("\nForcing entry to the monitor\n");
  41                monitor(hw_tf);
  42                return;
  43        case capchar2ctl('B'):
  44                /* backtrace / debugging for the core receiving the irq */
  45                printk("\nForced trapframe and backtrace for core %d\n",
  46                       core_id());
  47                if (!hw_tf) {
  48                        printk("(no hw_tf, we probably polled the console)\n");
  49                        return;
  50                }
  51                print_trapframe(hw_tf);
  52                backtrace_hwtf(hw_tf);
  53                return;
  54        }
  55        cons_add_char(c);
  56}
  57
  58static void cons_poller(void *arg)
  59{
  60        while (1) {
  61                kthread_usleep(10000);
  62                irq_console(0, arg);
  63        }
  64}
  65
  66static void cons_irq_init(void)
  67{
  68        struct cons_dev *i;
  69        /* Register interrupt handlers for all console devices */
  70        SLIST_FOREACH(i, &cdev_list, next) {
  71                register_irq(i->irq, irq_console, i, MKBUS(BusISA, 0, 0, 0));
  72                irq_console(0, i);
  73        }
  74}
  75
  76/* Init x86 processor extended state */
  77void ancillary_state_init(void)
  78{
  79        uint32_t eax, ebx, ecx, edx;
  80        /* proc supported user state components */
  81        uint64_t proc_supported_features;
  82
  83        // If you don't at least have FXSAVE and FXRSTOR
  84        // (includes OSFXSR), you don't boot.
  85        if (!cpu_has_feat(CPU_FEAT_X86_FXSR))
  86                panic("No FXSAVE/FXRSTOR (FXSR) support! Refusing to boot.");
  87
  88        if (cpu_has_feat(CPU_FEAT_X86_XSAVE)) {
  89                // Next determine the user state components supported
  90                // by the processor and set x86_default_xcr0 in
  91                // proc_global_info.
  92                cpuid(0x0d, 0x00, &eax, 0, 0, &edx);
  93                proc_supported_features = ((uint64_t)edx << 32) | eax;
  94
  95                // Intersection of processor-supported and Akaros-supported
  96                // features is the Akaros-wide default at runtime.
  97                __proc_global_info.x86_default_xcr0 = X86_MAX_XCR0 &
  98                                                      proc_supported_features;
  99
 100                /*
 101                 * Make sure CR4.OSXSAVE is set and set the local xcr0 to the
 102                 * default.  We will do both of these things again during
 103                 * per-cpu init, but we are about to use XSAVE to build our
 104                 * default extended state record, so we need them enabled.  You
 105                 * must set CR4_OSXSAVE before setting xcr0, or a #UD fault
 106                 * occurs.
 107                 */
 108                lcr4(rcr4() | CR4_OSXSAVE);
 109                lxcr0(__proc_global_info.x86_default_xcr0);
 110
 111                /* Build a default set of extended state values that we can
 112                 * later use to initialize extended state on other cores, or
 113                 * restore on this core.  FNINIT won't actually do it - if you
 114                 * xsave after fninit, x87 will show up as active in
 115                 * xstate_bv[0].  Instead, we just need the xstate_bv bits
 116                 * zeroed (and memset the rest for sanity's sake).
 117                 */
 118                memset(&x86_default_fpu, 0x00, sizeof(struct ancillary_state));
 119
 120                /* We must set the MXCSR field in the default state struct to
 121                 * its power-on value of 0x1f80. This masks all SIMD floating
 122                 * point exceptions and clears all SIMD floating-point exception
 123                 * flags, sets rounding control to round-nearest, disables
 124                 * flush-to-zero mode, and disables denormals-are-zero mode.
 125                 *
 126                 * We don't actually have to set the MXCSR itself here,
 127                 * because it will be set from the default state struct when
 128                 * we perform per-cpu init.
 129                 *
 130                 * Right now, we set the MXCSR through fp_head_64d. Since
 131                 * the mxcsr is at the same offset in all fp header formats
 132                 * implemented for Akaros, this will function correctly for
 133                 * all supported operating modes.
 134                 */
 135                x86_default_fpu.fp_head_64d.mxcsr = 0x1f80;
 136        } else {
 137                // Since no program should try to use XSAVE features
 138                // on this processor, we set x86_default_xcr0 to 0x0
 139                __proc_global_info.x86_default_xcr0 = 0x0;
 140
 141                /*
 142                 * Build a default set of extended state values that we can
 143                 * later use to initialize extended state on other cores, or
 144                 * restore on this core.  We need to use FNINIT to reset the FPU
 145                 * before saving, in case boot agents used the FPU or it is
 146                 * dirty for some reason. An old comment that used to be here
 147                 * said "had this happen on c89, which had a full FP stack after
 148                 * booting." Note that FNINIT does not clear the data registers,
 149                 * but it tags them all as empty (0b11).
 150                 */
 151
 152                // Zero the default extended state memory region before saving.
 153                // It may be possible for memset to clobber SSE registers.
 154                memset(&x86_default_fpu, 0x00, sizeof(struct ancillary_state));
 155
 156                /*
 157                 * FNINIT clears FIP and FDP and, even though it is technically
 158                 * a control instruction, it clears FOP while initializing the
 159                 * FPU.
 160                 *
 161                 * This marks the STX/MMX registers as empty in the FPU tag
 162                 * word, but does not actually clear the values in the
 163                 * registers, so we manually clear them in the xsave area after
 164                 * saving.
 165                 */
 166                asm volatile ("fninit");
 167
 168                // Save the x87 FPU state
 169                asm volatile("fxsave64 %0" : : "m"(x86_default_fpu));
 170
 171                /*
 172                 * Clear junk that might have been saved from the STX/MMX
 173                 * registers.
 174                 *
 175                 * FXSAVE may have also saved junk from the XMM registers,
 176                 * depending on how the hardware was implemented and the setting
 177                 * of CR4.OSFXSR. So we clear that too.
 178                 *
 179                 * MMX: 128 bytes, XMM: 256 bytes
 180                 */
 181                memset(&(x86_default_fpu.st0_mm0), 0x00, 128 + 256);
 182
 183                /*
 184                 * Finally, because Only the Paranoid Survive, we set the MXCSR
 185                 * for our default state. It should have been saved by FXSAVE,
 186                 * but who knows if the default value is still there at this
 187                 * point in the boot process.
 188                 */
 189                x86_default_fpu.fp_head_64d.mxcsr = 0x1f80;
 190        }
 191
 192}
 193
 194void arch_init(void)
 195{
 196        ancillary_state_init();
 197        pci_init();
 198        vmm_init();
 199        perfmon_global_init();
 200        // this returns when all other cores are done and ready to receive IPIs
 201        #ifdef CONFIG_SINGLE_CORE
 202                smp_percpu_init();
 203        #else
 204                smp_boot();
 205        #endif
 206        proc_init();
 207
 208        cons_irq_init();
 209        intel_lpc_init();
 210#ifdef CONFIG_ENABLE_LEGACY_USB
 211        printk("Legacy USB support enabled, expect SMM interference!\n");
 212#else
 213        usb_disable_legacy();
 214#endif
 215        check_timing_stability();
 216}
 217