akaros/kern/arch/x86/trap64.h
<<
>>
Prefs
   1/* Copyright (c) 2009-13 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * x86 trap.h bit-specific functions.  This is included by trap.h, do not
   6 * include it directly.  Any function beginning with x86_ is internal to x86,
   7 * and not to be called by the main kernel.  Other functions are part of the
   8 * kernel-arch interface. */
   9
  10#pragma once
  11
  12#ifndef ROS_KERN_ARCH_TRAP_H
  13#error "Do not include arch/trap64.h directly."
  14#endif
  15
  16#include <arch/fsgsbase.h>
  17
  18/* Helper: if *addr isn't a canonical user address, poison it.  Use this when
  19 * you need a canonical address (like MSR_FS_BASE) */
  20static inline void enforce_user_canon(uintptr_t *addr)
  21{
  22        if (*addr >> 47 != 0)
  23                *addr = 0x5a5a5a5a;
  24}
  25
  26static inline bool in_kernel(struct hw_trapframe *hw_tf)
  27{
  28        return (hw_tf->tf_cs & ~3) == GD_KT;
  29}
  30
  31static inline void x86_advance_ip(struct hw_trapframe *hw_tf, size_t bytes)
  32{
  33        hw_tf->tf_rip += bytes;
  34}
  35
  36static inline void x86_fake_rdtscp(struct hw_trapframe *hw_tf)
  37{
  38        uint64_t tsc_time = read_tsc();
  39        hw_tf->tf_rip += 3;
  40        hw_tf->tf_rax = tsc_time & 0xffffffff;
  41        hw_tf->tf_rdx = tsc_time >> 32;
  42        hw_tf->tf_rcx = core_id();
  43}
  44
  45#define AKAROS_MSR_STAR (((((uint64_t)GD_UD - 8) | 0x3) << 48) |             \
  46                         ((uint64_t)GD_KT << 32))
  47#define AKAROS_MSR_LSTAR ((uintptr_t)&sysenter_handler)
  48/* Masking all flags.  when we syscall, we'll get rflags = 0 */
  49#define AKAROS_MSR_SFMASK (FL_AC | FL_NT | FL_IOPL_MASK | FL_DF | FL_IF | FL_TF)
  50
  51static inline void x86_sysenter_init(void)
  52{
  53        /* check amd 2:6.1.1 for details.  they have some expectations about the
  54         * GDT layout. */
  55        write_msr(MSR_STAR, AKAROS_MSR_STAR);
  56        write_msr(MSR_LSTAR, AKAROS_MSR_LSTAR);
  57        write_msr(MSR_SFMASK, AKAROS_MSR_SFMASK);
  58        write_msr(IA32_EFER_MSR, read_msr(IA32_EFER_MSR) | IA32_EFER_SYSCALL);
  59}
  60
  61/* these are used for both sysenter and traps on 32 bit */
  62static inline void x86_set_sysenter_stacktop(uintptr_t stacktop)
  63{
  64        asm volatile ("movq %0, %%gs:0" : : "r"(stacktop));
  65}
  66
  67static inline long x86_get_sysenter_arg0(struct hw_trapframe *hw_tf)
  68{
  69        return hw_tf->tf_rdi;
  70}
  71
  72static inline long x86_get_sysenter_arg1(struct hw_trapframe *hw_tf)
  73{
  74        return hw_tf->tf_rsi;
  75}
  76
  77static inline long x86_get_systrap_arg0(struct hw_trapframe *hw_tf)
  78{
  79        return hw_tf->tf_rdi;
  80}
  81
  82static inline long x86_get_systrap_arg1(struct hw_trapframe *hw_tf)
  83{
  84        return hw_tf->tf_rsi;
  85}
  86
  87static inline uintptr_t x86_get_stacktop_tss(struct taskstate *tss)
  88{
  89        return tss->ts_rsp0;
  90}
  91
  92static inline void x86_set_stacktop_tss(struct taskstate *tss, uintptr_t top)
  93{
  94        tss->ts_rsp0 = top;
  95}
  96
  97/* Keep tf_padding0 in sync with trapentry64.S */
  98static inline bool x86_hwtf_is_partial(struct hw_trapframe *tf)
  99{
 100        return tf->tf_padding0 == 1;
 101}
 102
 103static inline bool x86_swtf_is_partial(struct sw_trapframe *tf)
 104{
 105        return tf->tf_padding0 == 1;
 106}
 107
 108static inline bool x86_vmtf_is_partial(struct vm_trapframe *tf)
 109{
 110        return tf->tf_flags & VMCTX_FL_PARTIAL ? TRUE : FALSE;
 111}
 112
 113static inline void x86_hwtf_clear_partial(struct hw_trapframe *tf)
 114{
 115        tf->tf_padding0 = 0;
 116}
 117
 118static inline void x86_swtf_clear_partial(struct sw_trapframe *tf)
 119{
 120        tf->tf_padding0 = 0;
 121}
 122
 123static inline void x86_vmtf_clear_partial(struct vm_trapframe *tf)
 124{
 125        tf->tf_flags &= ~VMCTX_FL_PARTIAL;
 126}
 127
 128static inline bool arch_ctx_is_partial(struct user_context *ctx)
 129{
 130        switch (ctx->type) {
 131        case ROS_HW_CTX:
 132                return x86_hwtf_is_partial(&ctx->tf.hw_tf);
 133        case ROS_SW_CTX:
 134                return x86_swtf_is_partial(&ctx->tf.sw_tf);
 135        case ROS_VM_CTX:
 136                return x86_vmtf_is_partial(&ctx->tf.vm_tf);
 137        }
 138        return FALSE;
 139}
 140