VMM: Manually save/restore certain registers [1/2]
authorBarret Rhoden <brho@cs.berkeley.edu>
Tue, 8 Nov 2016 22:33:28 +0000 (17:33 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 7 Dec 2016 22:46:48 +0000 (14:46 -0800)
Autoload is expensive.  We can do better by manually saving and restoring
registers.  We do it when we finalize the contexts, which will speed up any
kernel-handled VMEXIT.  In the future, if we lazily unload guest_pcores,
we'll also get this benefit.

Additionally, we can look at the MSRs values to see if we can avoid the
write_msr(), which can help a lot, and we can also use the special helper
for accessing kern_gsbase.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
kern/arch/x86/trap64.h
kern/arch/x86/vmm/vmm.c
kern/arch/x86/vmm/vmm.h

index 07bcf08..19f1cc5 100644 (file)
@@ -34,15 +34,19 @@ static inline void x86_fake_rdtscp(struct hw_trapframe *hw_tf)
        hw_tf->tf_rcx = core_id();
 }
 
+#define AKAROS_MSR_STAR (((((uint64_t)GD_UD - 8) | 0x3) << 48) |             \
+                            ((uint64_t)GD_KT << 32))
+#define AKAROS_MSR_LSTAR ((uintptr_t)&sysenter_handler)
+/* Masking all flags.  when we syscall, we'll get rflags = 0 */
+#define AKAROS_MSR_SFMASK 0xffffffff
+
 static inline void x86_sysenter_init(void)
 {
        /* check amd 2:6.1.1 for details.  they have some expectations about the GDT
         * layout. */
-       write_msr(MSR_STAR, ((((uint64_t)GD_UD - 8) | 0x3) << 48) |
-                           ((uint64_t)GD_KT << 32));
-       write_msr(MSR_LSTAR, (uintptr_t)&sysenter_handler);
-       /* Masking all flags.  when we syscall, we'll get rflags = 0 */
-       write_msr(MSR_SFMASK, 0xffffffff);
+       write_msr(MSR_STAR, AKAROS_MSR_STAR);
+       write_msr(MSR_LSTAR, AKAROS_MSR_LSTAR);
+       write_msr(MSR_SFMASK, AKAROS_MSR_SFMASK);
        write_msr(IA32_EFER_MSR, read_msr(IA32_EFER_MSR) | IA32_EFER_SYSCALL);
 }
 
index f8a8294..392a3aa 100644 (file)
@@ -182,6 +182,16 @@ struct guest_pcore *load_guest_pcore(struct proc *p, int guest_pcoreid)
        vmx_load_guest_pcore(gpc);
        /* Load guest's xcr0 */
        lxcr0(gpc->xcr0);
+
+       /* Manual MSR save/restore */
+       write_kern_gsbase(gpc->msr_kern_gs_base);
+       if (gpc->msr_star != AKAROS_MSR_STAR)
+               write_msr(MSR_STAR, gpc->msr_star);
+       if (gpc->msr_lstar != AKAROS_MSR_LSTAR)
+               write_msr(MSR_LSTAR, gpc->msr_lstar);
+       if (gpc->msr_sfmask != AKAROS_MSR_SFMASK)
+               write_msr(MSR_SFMASK, gpc->msr_sfmask);
+
        return gpc;
 }
 
@@ -202,6 +212,20 @@ void unload_guest_pcore(struct proc *p, int guest_pcoreid)
        gpc->xcr0 = rxcr0();
        lxcr0(__proc_global_info.x86_default_xcr0);
 
+       /* We manage these MSRs manually. */
+       gpc->msr_kern_gs_base = read_kern_gsbase();
+       gpc->msr_star = read_msr(MSR_STAR);
+       gpc->msr_lstar = read_msr(MSR_LSTAR);
+       gpc->msr_sfmask = read_msr(MSR_SFMASK);
+
+       write_kern_gsbase((uint64_t)pcpui);
+       if (gpc->msr_star != AKAROS_MSR_STAR)
+               write_msr(MSR_STAR, AKAROS_MSR_STAR);
+       if (gpc->msr_lstar != AKAROS_MSR_LSTAR)
+               write_msr(MSR_LSTAR, AKAROS_MSR_LSTAR);
+       if (gpc->msr_sfmask, AKAROS_MSR_SFMASK)
+               write_msr(MSR_SFMASK, AKAROS_MSR_SFMASK);
+
        /* As soon as we unlock, this gpc can be started on another core */
        spin_unlock(&p->vmm.lock);
        pcpui->guest_pcoreid = -1;
index 3203faf..179400a 100644 (file)
@@ -27,6 +27,10 @@ struct guest_pcore {
        } msr_autoload;
        struct vmcs *vmcs;
        uint64_t xcr0;
+       uint64_t msr_kern_gs_base;
+       uint64_t msr_star;
+       uint64_t msr_lstar;
+       uint64_t msr_sfmask;
 };
 
 struct vmm {