akaros/kern/arch/x86/smp_entry64.S
<<
>>
Prefs
   1#include <arch/mmu.h>
   2#include <ros/memlayout.h>
   3#include <arch/trap.h>
   4#include <arch/x86.h>
   5
   6#define RELOC(x) ((x) - KERNBASE)
   7#define CPUID_PSE_SUPPORT       0x00000008
   8
   9.globl smp_entry
  10smp_entry: .code16
  11        cli
  12        cld
  13        # announce our presence
  14        lock incw       smp_semaphore - smp_entry + 0x1000
  15spin_start:             # grab lock in real mode
  16        movw    $1, %ax
  17        xchgw   %ax, smp_boot_lock - smp_entry + 0x1000
  18        test    %ax, %ax
  19        jne     spin_start
  20        # Set up rudimentary segmentation
  21        xorw    %ax, %ax                        # Segment number zero
  22        movw    %ax, %ds                        # -> Data Segment
  23        movw    %ax, %es                        # -> Extra Segment
  24        movw    %ax, %ss                        # -> Stack Segment
  25        # Would like to patch all of these 0x1000's at trampoline relocation
  26        # time There's three of them, so we could patch the trampoline code
  27        # when we load, once we're sure the entry code will not change anymore
  28        lgdt    gdtdesc - smp_entry + 0x1000
  29        # Turn on protected mode
  30        movl    %cr0, %eax
  31        orl     $CR0_PE, %eax
  32        movl    %eax, %cr0
  33        ljmp    $GD_KT, $(protcseg - smp_entry + 0x1000)
  34.code32
  35protcseg:
  36        # Set up the protected-mode data segment registers
  37        movw    $GD_KD, %ax             # Kernel segment selector
  38        movw    %ax, %ds                # -> DS: Data Segment
  39        movw    %ax, %es                # -> ES: Extra Segment
  40        movw    %ax, %ss                # -> SS: Stack Segment
  41        movw    %ax, %fs                # -> FS
  42        movw    %ax, %gs                # -> GS
  43        # Turn on Paging.  We're using the symbol from entry64, which we'll
  44        # have no problem linking against (compared to boot_cr3).  this assumes
  45        # we use the boot stuff at least through smp_boot.
  46        movl    $boot_pml4, %eax
  47        movl    %eax, %cr3
  48        # turn on paging option in cr4.  note we assume PSE support.  if we
  49        # didn't have it, then our jumbo page mappings are going to fail.  we
  50        # also want global pages (for performance).  PAE is the basics needed
  51        # for long paging
  52        movl    %cr4, %eax
  53        orl     $(CR4_PSE | CR4_PGE | CR4_PAE), %eax
  54        movl    %eax, %cr4
  55        # Turn on the IA32E enabled bit.
  56        # rd/wrmsr use ecx for the addr, and eax as the in/out register.
  57        movl    $IA32_EFER_MSR, %ecx
  58        rdmsr
  59        orl     $IA32_EFER_IA32E_EN, %eax
  60        wrmsr
  61        # Setup cr0.  PE and PG are critical for now.  The others are similar
  62        # to what we want in general (-AM with 64 bit, it's useless).
  63        movl    %cr0, %eax
  64        orl     $(CR0_PE | CR0_PG | CR0_WP | CR0_NE | CR0_MP), %eax
  65        andl    $(~(CR0_AM | CR0_TS | CR0_EM | CR0_CD | CR0_NW)), %eax
  66        movl    %eax, %cr0
  67        # load the 64bit GDT and jump to long mode (symbol from entry64)
  68        lgdt    gdt64desc
  69        # Want to jump to the label long_mode, but we need to relocate to code
  70        # reachable by 32 bit code: on our trampoline page.
  71        ljmp    $0x08, $(long_mode - smp_entry + 0x1000)
  72.code64
  73long_mode:
  74        # Note: we are still running code on the trampoline
  75        # zero the data segments.  Not sure if this is legit or not.
  76        xor     %rax, %rax
  77        mov     %ax, %ds
  78        mov     %ax, %es
  79        mov     %ax, %ss
  80        mov     %ax, %fs
  81        mov     %ax, %gs
  82        lldt    %ax
  83        incl    x86_num_cores_booted            # an int
  84        movq    (smp_stack_top), %rsp
  85        movq    $0, %rbp                # so backtrace works
  86        # We're on the trampoline, but want to be in the real location of the
  87        # smp code (somewhere above KERN_LOAD_ADDR).  This allows us to easily
  88        # unmap the boot up memory, which the trampoline is part of.
  89        movabs  $(non_trampoline), %rax
  90        call    *%rax
  91non_trampoline:
  92        call    smp_main
  93        # use our new stack, value returned from smp_main
  94        movq    %rax, %rsp
  95        # note the next two lines are using the direct mapping from smp_boot().
  96        # Remember, the stuff at 0x1000 is a *copy* of the code and data at
  97        # KERN_LOAD_ADDR.
  98        movw    $0, smp_boot_lock - smp_entry + 0x1000  # release lock
  99        lock decw       smp_semaphore - smp_entry + 0x1000  # show we are done
 100        sti                     # so we can get the IPI
 101        hlt                     # wait for the IPI to run smp_pcu_init()
 102        call    smp_final_core_init
 103        call    smp_idle        # idle loop, will have interrupts turned on
 104        # smp_idle should never return
 105spin:
 106        jmp     spin
 107
 108        # Below here is just data, stored with the code text
 109        .p2align        2                       # force 4 byte alignment
 110gdt:
 111        SEG_NULL                                # null seg
 112        SEG(STA_X|STA_R, 0, 0xffffffff)         # code seg
 113        SEG(STA_W, 0, 0xffffffff)               # data seg
 114gdtdesc:
 115        .word   gdtdesc - gdt - 1               # sizeof(gdt) - 1
 116        .long   gdt - smp_entry + 0x1000        # address gdt
 117        .p2align        2                       # force 4 byte alignment
 118.globl smp_boot_lock
 119smp_boot_lock:                  # this lock word will be only used from
 120        .word   0               # its spot in the trampoline (0x1000)
 121.globl smp_semaphore
 122smp_semaphore:                  # poor man's polling semaphore
 123        .word   0
 124.globl smp_entry_end
 125smp_entry_end:
 126