1 /* Copyright (c) 2009-13 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * x86 trap.h bit-specific functions. This is included by trap.h, do not
6 * include it directly. Any function beginning with x86_ is internal to x86,
7 * and not to be called by the main kernel. Other functions are part of the
8 * kernel-arch interface. */
12 #ifndef ROS_KERN_ARCH_TRAP_H
13 #error "Do not include arch/trap64.h directly."
16 #include <arch/fsgsbase.h>
18 /* Helper: if *addr isn't a canonical user address, poison it. Use this when
19 * you need a canonical address (like MSR_FS_BASE) */
20 static inline void enforce_user_canon(uintptr_t *addr)
26 static inline bool in_kernel(struct hw_trapframe *hw_tf)
28 return (hw_tf->tf_cs & ~3) == GD_KT;
31 static inline void x86_advance_ip(struct hw_trapframe *hw_tf, size_t bytes)
33 hw_tf->tf_rip += bytes;
36 static inline void x86_fake_rdtscp(struct hw_trapframe *hw_tf)
38 uint64_t tsc_time = read_tsc();
40 hw_tf->tf_rax = tsc_time & 0xffffffff;
41 hw_tf->tf_rdx = tsc_time >> 32;
42 hw_tf->tf_rcx = core_id();
45 #define AKAROS_MSR_STAR (((((uint64_t)GD_UD - 8) | 0x3) << 48) | \
46 ((uint64_t)GD_KT << 32))
47 #define AKAROS_MSR_LSTAR ((uintptr_t)&sysenter_handler)
48 /* Masking all flags. when we syscall, we'll get rflags = 0 */
49 #define AKAROS_MSR_SFMASK (FL_AC | FL_NT | FL_IOPL_MASK | FL_DF | FL_IF | FL_TF)
51 static inline void x86_sysenter_init(void)
53 /* check amd 2:6.1.1 for details. they have some expectations about the
55 write_msr(MSR_STAR, AKAROS_MSR_STAR);
56 write_msr(MSR_LSTAR, AKAROS_MSR_LSTAR);
57 write_msr(MSR_SFMASK, AKAROS_MSR_SFMASK);
58 write_msr(IA32_EFER_MSR, read_msr(IA32_EFER_MSR) | IA32_EFER_SYSCALL);
61 /* these are used for both sysenter and traps on 32 bit */
62 static inline void x86_set_sysenter_stacktop(uintptr_t stacktop)
64 asm volatile ("movq %0, %%gs:0" : : "r"(stacktop));
67 static inline long x86_get_sysenter_arg0(struct hw_trapframe *hw_tf)
72 static inline long x86_get_sysenter_arg1(struct hw_trapframe *hw_tf)
77 static inline long x86_get_systrap_arg0(struct hw_trapframe *hw_tf)
82 static inline long x86_get_systrap_arg1(struct hw_trapframe *hw_tf)
87 static inline uintptr_t x86_get_stacktop_tss(struct taskstate *tss)
92 static inline void x86_set_stacktop_tss(struct taskstate *tss, uintptr_t top)
97 /* Keep tf_padding0 in sync with trapentry64.S */
98 static inline bool x86_hwtf_is_partial(struct hw_trapframe *tf)
100 return tf->tf_padding0 == 1;
103 static inline bool x86_swtf_is_partial(struct sw_trapframe *tf)
105 return tf->tf_padding0 == 1;
108 static inline bool x86_vmtf_is_partial(struct vm_trapframe *tf)
110 return tf->tf_flags & VMCTX_FL_PARTIAL ? TRUE : FALSE;
113 static inline void x86_hwtf_clear_partial(struct hw_trapframe *tf)
118 static inline void x86_swtf_clear_partial(struct sw_trapframe *tf)
123 static inline void x86_vmtf_clear_partial(struct vm_trapframe *tf)
125 tf->tf_flags &= ~VMCTX_FL_PARTIAL;
128 static inline bool arch_ctx_is_partial(struct user_context *ctx)
132 return x86_hwtf_is_partial(&ctx->tf.hw_tf);
134 return x86_swtf_is_partial(&ctx->tf.sw_tf);
136 return x86_vmtf_is_partial(&ctx->tf.vm_tf);