Lindent pass
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 29 Jan 2014 20:14:58 +0000 (12:14 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Wed, 29 Jan 2014 20:14:58 +0000 (12:14 -0800)
Downside: it messes up git's history for specific lines.

Here's a useful command (from the last time we did this):

git diff --name-status master..HEAD | grep '^A' | cut -f 2 | grep
'^kern' | grep -v Kbuild | xargs ./scripts/lindent

107 files changed:
kern/arch/x86/bitops.h
kern/arch/x86/emulate.c
kern/arch/x86/emulate.h
kern/arch/x86/msr-index.h
kern/arch/x86/paging_tmpl.h
kern/arch/x86/vm.h
kern/arch/x86/vmdebug.h
kern/arch/x86/vmx.c
kern/arch/x86/vmx.h
kern/arch/x86/vmx_mmu.c
kern/drivers/dev/alarm.c
kern/drivers/dev/cons.c
kern/drivers/dev/ether.c
kern/drivers/dev/mnt.c
kern/drivers/dev/pipe.c
kern/drivers/dev/proc.c
kern/drivers/dev/root.c
kern/drivers/dev/srv.c
kern/drivers/dev/vm.c
kern/drivers/net/e1000/e1000.c
kern/drivers/net/e1000/e1000.h
kern/drivers/net/e1000/e1000_82540.c
kern/drivers/net/e1000/e1000_82541.c
kern/drivers/net/e1000/e1000_82541.h
kern/drivers/net/e1000/e1000_82542.c
kern/drivers/net/e1000/e1000_82543.c
kern/drivers/net/e1000/e1000_82543.h
kern/drivers/net/e1000/e1000_api.c
kern/drivers/net/e1000/e1000_api.h
kern/drivers/net/e1000/e1000_defines.h
kern/drivers/net/e1000/e1000_hw.h
kern/drivers/net/e1000/e1000_mac.c
kern/drivers/net/e1000/e1000_mac.h
kern/drivers/net/e1000/e1000_main.c
kern/drivers/net/e1000/e1000_manage.c
kern/drivers/net/e1000/e1000_manage.h
kern/drivers/net/e1000/e1000_nvm.c
kern/drivers/net/e1000/e1000_nvm.h
kern/drivers/net/e1000/e1000_phy.c
kern/drivers/net/e1000/e1000_phy.h
kern/drivers/net/e1000/e1000_regs.h
kern/drivers/net/ether8139.c
kern/drivers/net/mii.c
kern/drivers/net/r8169.c
kern/drivers/net/r8169.h
kern/include/ip.h
kern/include/mii.h
kern/include/ns.h
kern/src/hexdump.c
kern/src/net/arp.c
kern/src/net/bootp.c
kern/src/net/compress.c
kern/src/net/devip.c
kern/src/net/dhcp.c
kern/src/net/dial.c
kern/src/net/eipconv.c
kern/src/net/esp.c
kern/src/net/ethermedium.c
kern/src/net/gre.c
kern/src/net/icmp.c
kern/src/net/icmp6.c
kern/src/net/igmp.c
kern/src/net/ihbootp.c
kern/src/net/ip.c
kern/src/net/ipaux.c
kern/src/net/ipifc.c
kern/src/net/ipmux.c
kern/src/net/ipprotoinit.c
kern/src/net/iproute.c
kern/src/net/iprouter.c
kern/src/net/ipv6.c
kern/src/net/kernel.h
kern/src/net/loopbackmedium.c
kern/src/net/netaux.c
kern/src/net/netdevmedium.c
kern/src/net/netif.c
kern/src/net/netlog.c
kern/src/net/nullmedium.c
kern/src/net/pktmedium.c
kern/src/net/plan9.c
kern/src/net/ppp.c
kern/src/net/ppp.h
kern/src/net/pppmedium.c
kern/src/net/ptclbsum.c
kern/src/net/rudp.c
kern/src/net/tcp.c
kern/src/net/udp.c
kern/src/ns/allocb.c
kern/src/ns/cache.c
kern/src/ns/chan.c
kern/src/ns/cleanname.c
kern/src/ns/convD2M.c
kern/src/ns/convM2D.c
kern/src/ns/convM2S.c
kern/src/ns/convM2kdirent.c
kern/src/ns/convS2M.c
kern/src/ns/dev.c
kern/src/ns/devtab.c
kern/src/ns/getfields.c
kern/src/ns/parse.c
kern/src/ns/pgrp.c
kern/src/ns/qio.c
kern/src/ns/random.c
kern/src/ns/sysfile.c
kern/src/ns/tokenize.c
kern/src/ns/util.c
kern/src/strstr.c

index 6dfd019..33a00bf 100644 (file)
@@ -62,13 +62,12 @@ static __always_inline void
 set_bit(unsigned int nr, volatile unsigned long *addr)
 {
        if (IS_IMMEDIATE(nr)) {
-               asm volatile(LOCK_PREFIX "orb %1,%0"
-                       : CONST_MASK_ADDR(nr, addr)
-                       : "iq" ((u8)CONST_MASK(nr))
-                       : "memory");
+               asm volatile (LOCK_PREFIX "orb %1,%0":CONST_MASK_ADDR(nr, addr)
+                                         :"iq"((u8) CONST_MASK(nr))
+                                         :"memory");
        } else {
-               asm volatile(LOCK_PREFIX "bts %1,%0"
-                       : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+               asm volatile (LOCK_PREFIX
+                                         "bts %1,%0":BITOP_ADDR(addr):"Ir"(nr):"memory");
        }
 }
 
@@ -83,7 +82,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
  */
 static inline void __set_bit(int nr, volatile unsigned long *addr)
 {
-       asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
+       asm volatile ("bts %1,%0":ADDR:"Ir"(nr):"memory");
 }
 
 /**
@@ -96,17 +95,14 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  * in order to ensure changes are visible on other processors.
  */
-static __always_inline void
-clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void clear_bit(int nr, volatile unsigned long *addr)
 {
        if (IS_IMMEDIATE(nr)) {
-               asm volatile(LOCK_PREFIX "andb %1,%0"
-                       : CONST_MASK_ADDR(nr, addr)
-                       : "iq" ((u8)~CONST_MASK(nr)));
+               asm volatile (LOCK_PREFIX "andb %1,%0":CONST_MASK_ADDR(nr, addr)
+                                         :"iq"((u8) ~ CONST_MASK(nr)));
        } else {
-               asm volatile(LOCK_PREFIX "btr %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+               asm volatile (LOCK_PREFIX "btr %1,%0":BITOP_ADDR(addr)
+                                         :"Ir"(nr));
        }
 }
 
@@ -126,7 +122,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
 
 static inline void __clear_bit(int nr, volatile unsigned long *addr)
 {
-       asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
+       asm volatile ("btr %1,%0":ADDR:"Ir"(nr));
 }
 
 /*
@@ -161,7 +157,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
  */
 static inline void __change_bit(int nr, volatile unsigned long *addr)
 {
-       asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
+       asm volatile ("btc %1,%0":ADDR:"Ir"(nr));
 }
 
 /**
@@ -176,13 +172,11 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
 static inline void change_bit(int nr, volatile unsigned long *addr)
 {
        if (IS_IMMEDIATE(nr)) {
-               asm volatile(LOCK_PREFIX "xorb %1,%0"
-                       : CONST_MASK_ADDR(nr, addr)
-                       : "iq" ((u8)CONST_MASK(nr)));
+               asm volatile (LOCK_PREFIX "xorb %1,%0":CONST_MASK_ADDR(nr, addr)
+                                         :"iq"((u8) CONST_MASK(nr)));
        } else {
-               asm volatile(LOCK_PREFIX "btc %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+               asm volatile (LOCK_PREFIX "btc %1,%0":BITOP_ADDR(addr)
+                                         :"Ir"(nr));
        }
 }
 
@@ -198,8 +192,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
 {
        int oldbit;
 
-       asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
-                    "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
+       asm volatile (LOCK_PREFIX "bts %2,%1\n\t"
+                                 "sbb %0,%0":"=r"(oldbit), ADDR:"Ir"(nr):"memory");
 
        return oldbit;
 }
@@ -230,10 +224,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
        int oldbit;
 
-       asm("bts %2,%1\n\t"
-           "sbb %0,%0"
-           : "=r" (oldbit), ADDR
-           : "Ir" (nr));
+asm("bts %2,%1\n\t" "sbb %0,%0": "=r"(oldbit), ADDR:"Ir"(nr));
        return oldbit;
 }
 
@@ -249,9 +240,8 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
        int oldbit;
 
-       asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
+       asm volatile (LOCK_PREFIX "btr %2,%1\n\t"
+                                 "sbb %0,%0":"=r"(oldbit), ADDR:"Ir"(nr):"memory");
 
        return oldbit;
 }
@@ -276,10 +266,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
        int oldbit;
 
-       asm volatile("btr %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit), ADDR
-                    : "Ir" (nr));
+       asm volatile ("btr %2,%1\n\t" "sbb %0,%0":"=r" (oldbit), ADDR:"Ir"(nr));
        return oldbit;
 }
 
@@ -288,10 +275,8 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
 {
        int oldbit;
 
-       asm volatile("btc %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit), ADDR
-                    : "Ir" (nr) : "memory");
+       asm volatile ("btc %2,%1\n\t"
+                                 "sbb %0,%0":"=r" (oldbit), ADDR:"Ir"(nr):"memory");
 
        return oldbit;
 }
@@ -308,32 +293,29 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
 {
        int oldbit;
 
-       asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
+       asm volatile (LOCK_PREFIX "btc %2,%1\n\t"
+                                 "sbb %0,%0":"=r"(oldbit), ADDR:"Ir"(nr):"memory");
 
        return oldbit;
 }
 
-static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(unsigned int nr,
+                                                                                        const volatile unsigned long *addr)
 {
-       return ((1UL << (nr % BITS_PER_LONG)) &
-               (addr[nr / BITS_PER_LONG])) != 0;
+       return ((1UL << (nr % BITS_PER_LONG)) & (addr[nr / BITS_PER_LONG])) != 0;
 }
 
 static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
 {
        int oldbit;
 
-       asm volatile("bt %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit)
-                    : "m" (*(unsigned long *)addr), "Ir" (nr));
+       asm volatile ("bt %2,%1\n\t" "sbb %0,%0":"=r" (oldbit)
+                                 :"m"(*(unsigned long *)addr), "Ir"(nr));
 
        return oldbit;
 }
 
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
+#if 0  /* Fool kernel-doc since it doesn't do macros yet */
 /**
  * test_bit - Determine whether a bit is set
  * @nr: bit number to test
@@ -355,9 +337,8 @@ static int test_bit(int nr, const volatile unsigned long *addr);
  */
 static inline unsigned long __ffs(unsigned long word)
 {
-       asm("rep; bsf %1,%0"
-               : "=r" (word)
-               : "rm" (word));
+asm("rep; bsf %1,%0":"=r"(word)
+:              "rm"(word));
        return word;
 }
 
@@ -369,9 +350,8 @@ static inline unsigned long __ffs(unsigned long word)
  */
 static inline unsigned long ffz(unsigned long word)
 {
-       asm("rep; bsf %1,%0"
-               : "=r" (word)
-               : "r" (~word));
+asm("rep; bsf %1,%0":"=r"(word)
+:              "r"(~word));
        return word;
 }
 
@@ -383,9 +363,8 @@ static inline unsigned long ffz(unsigned long word)
  */
 static inline unsigned long __fls(unsigned long word)
 {
-       asm("bsr %1,%0"
-           : "=r" (word)
-           : "rm" (word));
+asm("bsr %1,%0":"=r"(word)
+:              "rm"(word));
        return word;
 }
 
@@ -417,18 +396,12 @@ static inline int ffs(int x)
         * We cannot do this on 32 bits because at the very least some
         * 486 CPUs did not behave this way.
         */
-       asm("bsfl %1,%0"
-           : "=r" (r)
-           : "rm" (x), "0" (-1));
+asm("bsfl %1,%0":"=r"(r)
+:              "rm"(x), "0"(-1));
 #elif defined(CONFIG_X86_CMOV)
-       asm("bsfl %1,%0\n\t"
-           "cmovzl %2,%0"
-           : "=&r" (r) : "rm" (x), "r" (-1));
+asm("bsfl %1,%0\n\t" "cmovzl %2,%0": "=&r"(r):"rm"(x), "r"(-1));
 #else
-       asm("bsfl %1,%0\n\t"
-           "jnz 1f\n\t"
-           "movl $-1,%0\n"
-           "1:" : "=r" (r) : "rm" (x));
+asm("bsfl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:": "=r"(r):"rm"(x));
 #endif
        return r + 1;
 }
@@ -458,18 +431,12 @@ static inline int fls(int x)
         * We cannot do this on 32 bits because at the very least some
         * 486 CPUs did not behave this way.
         */
-       asm("bsrl %1,%0"
-           : "=r" (r)
-           : "rm" (x), "0" (-1));
+asm("bsrl %1,%0":"=r"(r)
+:              "rm"(x), "0"(-1));
 #elif defined(CONFIG_X86_CMOV)
-       asm("bsrl %1,%0\n\t"
-           "cmovzl %2,%0"
-           : "=&r" (r) : "rm" (x), "rm" (-1));
+asm("bsrl %1,%0\n\t" "cmovzl %2,%0": "=&r"(r):"rm"(x), "rm"(-1));
 #else
-       asm("bsrl %1,%0\n\t"
-           "jnz 1f\n\t"
-           "movl $-1,%0\n"
-           "1:" : "=r" (r) : "rm" (x));
+asm("bsrl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:": "=r"(r):"rm"(x));
 #endif
        return r + 1;
 }
@@ -494,9 +461,8 @@ static __always_inline int fls64(__u64 x)
         * dest reg is undefined if x==0, but their CPU architect says its
         * value is written to set it to the same as before.
         */
-       asm("bsrq %1,%q0"
-           : "+r" (bitpos)
-           : "rm" (x));
+asm("bsrq %1,%q0":"+r"(bitpos)
+:              "rm"(x));
        return bitpos + 1;
 }
 #else
index e741947..2377a12 100644 (file)
@@ -134,7 +134,7 @@ static uint8_t opcode_table[256] = {
        /* 0xC0 - 0xC7 */
        ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0,
        0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov,
-           DstMem | SrcImm | ModRM | Mov,
+       DstMem | SrcImm | ModRM | Mov,
        /* 0xC8 - 0xCF */
        0, 0, 0, 0, 0, 0, 0, 0,
        /* 0xD0 - 0xD7 */
@@ -189,13 +189,13 @@ static uint8_t twobyte_table[256] = {
        0, 0, 0, DstMem | SrcReg | ModRM, 0, 0, 0, 0,
        /* 0xB0 - 0xB7 */
        ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0,
-           DstMem | SrcReg | ModRM,
+       DstMem | SrcReg | ModRM,
        0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
-           DstReg | SrcMem16 | ModRM | Mov,
+       DstReg | SrcMem16 | ModRM | Mov,
        /* 0xB8 - 0xBF */
        0, 0, DstMem | SrcImmByte | ModRM, DstMem | SrcReg | ModRM,
        0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
-           DstReg | SrcMem16 | ModRM | Mov,
+       DstReg | SrcMem16 | ModRM | Mov,
        /* 0xC0 - 0xCF */
        0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 0,
        /* 0xD0 - 0xDF */
@@ -230,11 +230,11 @@ struct operand {
  */
 
 #if defined(__x86_64__)
-#define _LO32 "k"              /* force 32-bit operand */
-#define _STK  "%%rsp"          /* stack pointer */
+#define _LO32 "k"      /* force 32-bit operand */
+#define _STK  "%%rsp"  /* stack pointer */
 #elif defined(__i386__)
-#define _LO32 ""               /* force 32-bit operand */
-#define _STK  "%%esp"          /* stack pointer */
+#define _LO32 ""       /* force 32-bit operand */
+#define _STK  "%%esp"  /* stack pointer */
 #endif
 
 /*
@@ -400,7 +400,7 @@ struct operand {
 #elif defined(__i386__)
 #define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)
 #define __emulate_1op_8byte(_op, _dst, _eflags)
-#endif                         /* __i386__ */
+#endif /* __i386__ */
 
 /* Fetch next part of the instruction being emulated. */
 #define insn_fetch(_type, _size, _eip)                                  \
@@ -429,8 +429,7 @@ struct operand {
                           (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1)); \
        } while (0)
 
-void *decode_register(uint8_t modrm_reg, unsigned long *regs,
-                     int highbyte_regs)
+void *decode_register(uint8_t modrm_reg, unsigned long *regs, int highbyte_regs)
 {
        void *p;
 
@@ -441,9 +440,10 @@ void *decode_register(uint8_t modrm_reg, unsigned long *regs,
 }
 
 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
-                          struct x86_emulate_ops *ops,
-                          void *ptr,
-                          uint16_t *size, unsigned long *address, int op_bytes)
+                                                  struct x86_emulate_ops *ops,
+                                                  void *ptr,
+                                                  uint16_t * size, unsigned long *address,
+                                                  int op_bytes)
 {
        int rc;
 
@@ -479,63 +479,63 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
        memcpy(_regs, ctxt->vcpu->regs, sizeof _regs);
 
        switch (mode) {
-       case X86EMUL_MODE_REAL:
-       case X86EMUL_MODE_PROT16:
-               op_bytes = ad_bytes = 2;
-               break;
-       case X86EMUL_MODE_PROT32:
-               op_bytes = ad_bytes = 4;
-               break;
+               case X86EMUL_MODE_REAL:
+               case X86EMUL_MODE_PROT16:
+                       op_bytes = ad_bytes = 2;
+                       break;
+               case X86EMUL_MODE_PROT32:
+                       op_bytes = ad_bytes = 4;
+                       break;
 #ifdef __x86_64__
-       case X86EMUL_MODE_PROT64:
-               op_bytes = 4;
-               ad_bytes = 8;
-               break;
+               case X86EMUL_MODE_PROT64:
+                       op_bytes = 4;
+                       ad_bytes = 8;
+                       break;
 #endif
-       default:
-               return -1;
+               default:
+                       return -1;
        }
 
        /* Legacy prefixes. */
        for (i = 0; i < 8; i++) {
                switch (b = insn_fetch(uint8_t, 1, _eip)) {
-               case 0x66:      /* operand-size override */
-                       op_bytes ^= 6;  /* switch between 2/4 bytes */
-                       break;
-               case 0x67:      /* address-size override */
-                       if (mode == X86EMUL_MODE_PROT64)
-                               ad_bytes ^= 12; /* switch between 4/8 bytes */
-                       else
-                               ad_bytes ^= 6;  /* switch between 2/4 bytes */
-                       break;
-               case 0x2e:      /* CS override */
-                       override_base = &ctxt->cs_base;
-                       break;
-               case 0x3e:      /* DS override */
-                       override_base = &ctxt->ds_base;
-                       break;
-               case 0x26:      /* ES override */
-                       override_base = &ctxt->es_base;
-                       break;
-               case 0x64:      /* FS override */
-                       override_base = &ctxt->fs_base;
-                       break;
-               case 0x65:      /* GS override */
-                       override_base = &ctxt->gs_base;
-                       break;
-               case 0x36:      /* SS override */
-                       override_base = &ctxt->ss_base;
-                       break;
-               case 0xf0:      /* LOCK */
-                       lock_prefix = 1;
-                       break;
-               case 0xf3:      /* REP/REPE/REPZ */
-                       rep_prefix = 1;
-                       break;
-               case 0xf2:      /* REPNE/REPNZ */
-                       break;
-               default:
-                       goto done_prefixes;
+                       case 0x66:      /* operand-size override */
+                               op_bytes ^= 6;  /* switch between 2/4 bytes */
+                               break;
+                       case 0x67:      /* address-size override */
+                               if (mode == X86EMUL_MODE_PROT64)
+                                       ad_bytes ^= 12; /* switch between 4/8 bytes */
+                               else
+                                       ad_bytes ^= 6;  /* switch between 2/4 bytes */
+                               break;
+                       case 0x2e:      /* CS override */
+                               override_base = &ctxt->cs_base;
+                               break;
+                       case 0x3e:      /* DS override */
+                               override_base = &ctxt->ds_base;
+                               break;
+                       case 0x26:      /* ES override */
+                               override_base = &ctxt->es_base;
+                               break;
+                       case 0x64:      /* FS override */
+                               override_base = &ctxt->fs_base;
+                               break;
+                       case 0x65:      /* GS override */
+                               override_base = &ctxt->gs_base;
+                               break;
+                       case 0x36:      /* SS override */
+                               override_base = &ctxt->ss_base;
+                               break;
+                       case 0xf0:      /* LOCK */
+                               lock_prefix = 1;
+                               break;
+                       case 0xf3:      /* REP/REPE/REPZ */
+                               rep_prefix = 1;
+                               break;
+                       case 0xf2:      /* REPNE/REPNZ */
+                               break;
+                       default:
+                               goto done_prefixes;
                }
        }
 
@@ -547,8 +547,8 @@ done_prefixes:
                if (b & 8)
                        op_bytes = 8;   /* REX.W */
                modrm_reg = (b & 4) << 1;       /* REX.R */
-               index_reg = (b & 2) << 2; /* REX.X */
-               modrm_rm = base_reg = (b & 1) << 3; /* REG.B */
+               index_reg = (b & 2) << 2;       /* REX.X */
+               modrm_rm = base_reg = (b & 1) << 3;     /* REG.B */
                b = insn_fetch(uint8_t, 1, _eip);
        }
 
@@ -590,105 +590,104 @@ done_prefixes:
 
                        /* 16-bit ModR/M decode. */
                        switch (modrm_mod) {
-                       case 0:
-                               if (modrm_rm == 6)
+                               case 0:
+                                       if (modrm_rm == 6)
+                                               modrm_ea += insn_fetch(uint16_t, 2, _eip);
+                                       break;
+                               case 1:
+                                       modrm_ea += insn_fetch(int8_t, 1, _eip);
+                                       break;
+                               case 2:
                                        modrm_ea += insn_fetch(uint16_t, 2, _eip);
-                               break;
-                       case 1:
-                               modrm_ea += insn_fetch(int8_t, 1, _eip);
-                               break;
-                       case 2:
-                               modrm_ea += insn_fetch(uint16_t, 2, _eip);
-                               break;
+                                       break;
                        }
                        switch (modrm_rm) {
-                       case 0:
-                               modrm_ea += bx + si;
-                               break;
-                       case 1:
-                               modrm_ea += bx + di;
-                               break;
-                       case 2:
-                               modrm_ea += bp + si;
-                               break;
-                       case 3:
-                               modrm_ea += bp + di;
-                               break;
-                       case 4:
-                               modrm_ea += si;
-                               break;
-                       case 5:
-                               modrm_ea += di;
-                               break;
-                       case 6:
-                               if (modrm_mod != 0)
-                                       modrm_ea += bp;
-                               break;
-                       case 7:
-                               modrm_ea += bx;
-                               break;
+                               case 0:
+                                       modrm_ea += bx + si;
+                                       break;
+                               case 1:
+                                       modrm_ea += bx + di;
+                                       break;
+                               case 2:
+                                       modrm_ea += bp + si;
+                                       break;
+                               case 3:
+                                       modrm_ea += bp + di;
+                                       break;
+                               case 4:
+                                       modrm_ea += si;
+                                       break;
+                               case 5:
+                                       modrm_ea += di;
+                                       break;
+                               case 6:
+                                       if (modrm_mod != 0)
+                                               modrm_ea += bp;
+                                       break;
+                               case 7:
+                                       modrm_ea += bx;
+                                       break;
                        }
                        if (modrm_rm == 2 || modrm_rm == 3 ||
-                           (modrm_rm == 6 && modrm_mod != 0))
+                               (modrm_rm == 6 && modrm_mod != 0))
                                if (!override_base)
                                        override_base = &ctxt->ss_base;
-                       modrm_ea = (uint16_t)modrm_ea;
+                       modrm_ea = (uint16_t) modrm_ea;
                } else {
                        /* 32/64-bit ModR/M decode. */
                        switch (modrm_rm) {
-                       case 4:
-                       case 12:
-                               sib = insn_fetch(uint8_t, 1, _eip);
-                               index_reg |= (sib >> 3) & 7;
-                               base_reg |= sib & 7;
-                               scale = sib >> 6;
+                               case 4:
+                               case 12:
+                                       sib = insn_fetch(uint8_t, 1, _eip);
+                                       index_reg |= (sib >> 3) & 7;
+                                       base_reg |= sib & 7;
+                                       scale = sib >> 6;
 
-                               switch (base_reg) {
+                                       switch (base_reg) {
+                                               case 5:
+                                                       if (modrm_mod != 0)
+                                                               modrm_ea += _regs[base_reg];
+                                                       else
+                                                               modrm_ea += insn_fetch(int32_t, 4, _eip);
+                                                       break;
+                                               default:
+                                                       modrm_ea += _regs[base_reg];
+                                       }
+                                       switch (index_reg) {
+                                               case 4:
+                                                       break;
+                                               default:
+                                                       modrm_ea += _regs[index_reg] << scale;
+
+                                       }
+                                       break;
                                case 5:
                                        if (modrm_mod != 0)
-                                               modrm_ea += _regs[base_reg];
-                                       else
-                                               modrm_ea += insn_fetch(int32_t, 4, _eip);
-                                       break;
-                               default:
-                                       modrm_ea += _regs[base_reg];
-                               }
-                               switch (index_reg) {
-                               case 4:
+                                               modrm_ea += _regs[modrm_rm];
+                                       else if (mode == X86EMUL_MODE_PROT64)
+                                               rip_relative = 1;
                                        break;
                                default:
-                                       modrm_ea += _regs[index_reg] << scale;
-
-                               }
-                               break;
-                       case 5:
-                               if (modrm_mod != 0)
                                        modrm_ea += _regs[modrm_rm];
-                               else if (mode == X86EMUL_MODE_PROT64)
-                                       rip_relative = 1;
-                               break;
-                       default:
-                               modrm_ea += _regs[modrm_rm];
-                               break;
+                                       break;
                        }
                        switch (modrm_mod) {
-                       case 0:
-                               if (modrm_rm == 5)
+                               case 0:
+                                       if (modrm_rm == 5)
+                                               modrm_ea += insn_fetch(int32_t, 4, _eip);
+                                       break;
+                               case 1:
+                                       modrm_ea += insn_fetch(int8_t, 1, _eip);
+                                       break;
+                               case 2:
                                        modrm_ea += insn_fetch(int32_t, 4, _eip);
-                               break;
-                       case 1:
-                               modrm_ea += insn_fetch(int8_t, 1, _eip);
-                               break;
-                       case 2:
-                               modrm_ea += insn_fetch(int32_t, 4, _eip);
-                               break;
+                                       break;
                        }
                }
                if (!override_base)
                        override_base = &ctxt->ds_base;
                if (mode == X86EMUL_MODE_PROT64 &&
-                   override_base != &ctxt->fs_base &&
-                   override_base != &ctxt->gs_base)
+                       override_base != &ctxt->fs_base && override_base != &ctxt->gs_base)
                        override_base = 0;
 
                if (override_base)
@@ -697,63 +696,61 @@ done_prefixes:
                if (rip_relative) {
                        modrm_ea += _eip;
                        switch (d & SrcMask) {
-                       case SrcImmByte:
-                               modrm_ea += 1;
-                               break;
-                       case SrcImm:
-                               if (d & ByteOp)
+                               case SrcImmByte:
                                        modrm_ea += 1;
-                               else
-                                       if (op_bytes == 8)
+                                       break;
+                               case SrcImm:
+                                       if (d & ByteOp)
+                                               modrm_ea += 1;
+                                       else if (op_bytes == 8)
                                                modrm_ea += 4;
                                        else
                                                modrm_ea += op_bytes;
                        }
                }
                if (ad_bytes != 8)
-                       modrm_ea = (uint32_t)modrm_ea;
+                       modrm_ea = (uint32_t) modrm_ea;
                cr2 = modrm_ea;
-       modrm_done:
+modrm_done:
                ;
        }
 
        /* Decode and fetch the destination operand: register or memory. */
        switch (d & DstMask) {
-       case ImplicitOps:
-               /* Special instructions do their own operand decoding. */
-               goto special_insn;
-       case DstReg:
-               dst.type = OP_REG;
-               if ((d & ByteOp)
-                   && !(twobyte && (b == 0xb6 || b == 0xb7))) {
-                       dst.ptr = decode_register(modrm_reg, _regs,
-                                                 (rex_prefix == 0));
-                       dst.val = *(uint8_t *) dst.ptr;
-                       dst.bytes = 1;
-               } else {
-                       dst.ptr = decode_register(modrm_reg, _regs, 0);
-                       switch ((dst.bytes = op_bytes)) {
-                       case 2:
-                               dst.val = *(uint16_t *)dst.ptr;
-                               break;
-                       case 4:
-                               dst.val = *(uint32_t *)dst.ptr;
-                               break;
-                       case 8:
-                               dst.val = *(uint64_t *)dst.ptr;
-                               break;
+               case ImplicitOps:
+                       /* Special instructions do their own operand decoding. */
+                       goto special_insn;
+               case DstReg:
+                       dst.type = OP_REG;
+                       if ((d & ByteOp)
+                               && !(twobyte && (b == 0xb6 || b == 0xb7))) {
+                               dst.ptr = decode_register(modrm_reg, _regs, (rex_prefix == 0));
+                               dst.val = *(uint8_t *) dst.ptr;
+                               dst.bytes = 1;
+                       } else {
+                               dst.ptr = decode_register(modrm_reg, _regs, 0);
+                               switch ((dst.bytes = op_bytes)) {
+                                       case 2:
+                                               dst.val = *(uint16_t *) dst.ptr;
+                                               break;
+                                       case 4:
+                                               dst.val = *(uint32_t *) dst.ptr;
+                                               break;
+                                       case 8:
+                                               dst.val = *(uint64_t *) dst.ptr;
+                                               break;
+                               }
                        }
-               }
-               break;
-       case DstMem:
-               dst.type = OP_MEM;
-               dst.ptr = (unsigned long *)cr2;
-               dst.bytes = (d & ByteOp) ? 1 : op_bytes;
-               if (!(d & Mov) && /* optimisation - avoid slow emulated read */
-                   ((rc = ops->read_emulated((unsigned long)dst.ptr,
-                                             &dst.val, dst.bytes, ctxt)) != 0))
-                       goto done;
-               break;
+                       break;
+               case DstMem:
+                       dst.type = OP_MEM;
+                       dst.ptr = (unsigned long *)cr2;
+                       dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+                       if (!(d & Mov) &&       /* optimisation - avoid slow emulated read */
+                               ((rc = ops->read_emulated((unsigned long)dst.ptr,
+                                                                                 &dst.val, dst.bytes, ctxt)) != 0))
+                               goto done;
+                       break;
        }
        dst.orig_val = dst.val;
 
@@ -762,317 +759,313 @@ done_prefixes:
         * or immediate.
         */
        switch (d & SrcMask) {
-       case SrcNone:
-               break;
-       case SrcReg:
-               src.type = OP_REG;
-               if (d & ByteOp) {
-                       src.ptr = decode_register(modrm_reg, _regs,
-                                                 (rex_prefix == 0));
-                       src.val = src.orig_val = *(uint8_t *) src.ptr;
-                       src.bytes = 1;
-               } else {
-                       src.ptr = decode_register(modrm_reg, _regs, 0);
-                       switch ((src.bytes = op_bytes)) {
-                       case 2:
-                               src.val = src.orig_val = *(uint16_t *) src.ptr;
-                               break;
-                       case 4:
-                               src.val = src.orig_val = *(uint32_t *) src.ptr;
-                               break;
-                       case 8:
-                               src.val = src.orig_val = *(uint64_t *) src.ptr;
-                               break;
+               case SrcNone:
+                       break;
+               case SrcReg:
+                       src.type = OP_REG;
+                       if (d & ByteOp) {
+                               src.ptr = decode_register(modrm_reg, _regs, (rex_prefix == 0));
+                               src.val = src.orig_val = *(uint8_t *) src.ptr;
+                               src.bytes = 1;
+                       } else {
+                               src.ptr = decode_register(modrm_reg, _regs, 0);
+                               switch ((src.bytes = op_bytes)) {
+                                       case 2:
+                                               src.val = src.orig_val = *(uint16_t *) src.ptr;
+                                               break;
+                                       case 4:
+                                               src.val = src.orig_val = *(uint32_t *) src.ptr;
+                                               break;
+                                       case 8:
+                                               src.val = src.orig_val = *(uint64_t *) src.ptr;
+                                               break;
+                               }
                        }
-               }
-               break;
-       case SrcMem16:
-               src.bytes = 2;
-               goto srcmem_common;
-       case SrcMem32:
-               src.bytes = 4;
-               goto srcmem_common;
-       case SrcMem:
-               src.bytes = (d & ByteOp) ? 1 : op_bytes;
-             srcmem_common:
-               src.type = OP_MEM;
-               src.ptr = (unsigned long *)cr2;
-               if ((rc = ops->read_emulated((unsigned long)src.ptr,
-                                            &src.val, src.bytes, ctxt)) != 0)
-                       goto done;
-               src.orig_val = src.val;
-               break;
-       case SrcImm:
-               src.type = OP_IMM;
-               src.ptr = (unsigned long *)_eip;
-               src.bytes = (d & ByteOp) ? 1 : op_bytes;
-               if (src.bytes == 8)
+                       break;
+               case SrcMem16:
+                       src.bytes = 2;
+                       goto srcmem_common;
+               case SrcMem32:
                        src.bytes = 4;
-               /* NB. Immediates are sign-extended as necessary. */
-               switch (src.bytes) {
-               case 1:
-                       src.val = insn_fetch(int8_t, 1, _eip);
+                       goto srcmem_common;
+               case SrcMem:
+                       src.bytes = (d & ByteOp) ? 1 : op_bytes;
+srcmem_common:
+                       src.type = OP_MEM;
+                       src.ptr = (unsigned long *)cr2;
+                       if ((rc = ops->read_emulated((unsigned long)src.ptr,
+                                                                                &src.val, src.bytes, ctxt)) != 0)
+                               goto done;
+                       src.orig_val = src.val;
                        break;
-               case 2:
-                       src.val = insn_fetch(int16_t, 2, _eip);
+               case SrcImm:
+                       src.type = OP_IMM;
+                       src.ptr = (unsigned long *)_eip;
+                       src.bytes = (d & ByteOp) ? 1 : op_bytes;
+                       if (src.bytes == 8)
+                               src.bytes = 4;
+                       /* NB. Immediates are sign-extended as necessary. */
+                       switch (src.bytes) {
+                               case 1:
+                                       src.val = insn_fetch(int8_t, 1, _eip);
+                                       break;
+                               case 2:
+                                       src.val = insn_fetch(int16_t, 2, _eip);
+                                       break;
+                               case 4:
+                                       src.val = insn_fetch(int32_t, 4, _eip);
+                                       break;
+                       }
                        break;
-               case 4:
-                       src.val = insn_fetch(int32_t, 4, _eip);
+               case SrcImmByte:
+                       src.type = OP_IMM;
+                       src.ptr = (unsigned long *)_eip;
+                       src.bytes = 1;
+                       src.val = insn_fetch(int8_t, 1, _eip);
                        break;
-               }
-               break;
-       case SrcImmByte:
-               src.type = OP_IMM;
-               src.ptr = (unsigned long *)_eip;
-               src.bytes = 1;
-               src.val = insn_fetch(int8_t, 1, _eip);
-               break;
        }
 
        if (twobyte)
                goto twobyte_insn;
 
        switch (b) {
-       case 0x00 ... 0x05:
-             add:              /* add */
-               emulate_2op_SrcV("add", src, dst, _eflags);
-               break;
-       case 0x08 ... 0x0d:
-             or:               /* or */
-               emulate_2op_SrcV("or", src, dst, _eflags);
-               break;
-       case 0x10 ... 0x15:
-             adc:              /* adc */
-               emulate_2op_SrcV("adc", src, dst, _eflags);
-               break;
-       case 0x18 ... 0x1d:
-             sbb:              /* sbb */
-               emulate_2op_SrcV("sbb", src, dst, _eflags);
-               break;
-       case 0x20 ... 0x25:
-             and:              /* and */
-               emulate_2op_SrcV("and", src, dst, _eflags);
-               break;
-       case 0x28 ... 0x2d:
-             sub:              /* sub */
-               emulate_2op_SrcV("sub", src, dst, _eflags);
-               break;
-       case 0x30 ... 0x35:
-             xor:              /* xor */
-               emulate_2op_SrcV("xor", src, dst, _eflags);
-               break;
-       case 0x38 ... 0x3d:
-             cmp:              /* cmp */
-               emulate_2op_SrcV("cmp", src, dst, _eflags);
-               break;
-       case 0x63:              /* movsxd */
-               if (mode != X86EMUL_MODE_PROT64)
-                       goto cannot_emulate;
-               dst.val = (int32_t) src.val;
-               break;
-       case 0x80 ... 0x83:     /* Grp1 */
-               switch (modrm_reg) {
-               case 0:
-                       goto add;
-               case 1:
-                       goto or;
-               case 2:
-                       goto adc;
-               case 3:
-                       goto sbb;
-               case 4:
-                       goto and;
-               case 5:
-                       goto sub;
-               case 6:
-                       goto xor;
-               case 7:
-                       goto cmp;
-               }
-               break;
-       case 0x84 ... 0x85:
-             test:             /* test */
-               emulate_2op_SrcV("test", src, dst, _eflags);
-               break;
-       case 0x86 ... 0x87:     /* xchg */
-               /* Write back the register source. */
-               switch (dst.bytes) {
-               case 1:
-                       *(uint8_t *) src.ptr = (uint8_t) dst.val;
+               case 0x00 ... 0x05:
+add:   /* add */
+                       emulate_2op_SrcV("add", src, dst, _eflags);
                        break;
-               case 2:
-                       *(uint16_t *) src.ptr = (uint16_t) dst.val;
+               case 0x08 ... 0x0d:
+or:    /* or */
+                       emulate_2op_SrcV("or", src, dst, _eflags);
                        break;
-               case 4:
-                       *src.ptr = (uint32_t) dst.val;
-                       break;  /* 64b reg: zero-extend */
-               case 8:
-                       *src.ptr = dst.val;
+               case 0x10 ... 0x15:
+adc:   /* adc */
+                       emulate_2op_SrcV("adc", src, dst, _eflags);
                        break;
-               }
-               /*
-                * Write back the memory destination with implicit LOCK
-                * prefix.
-                */
-               dst.val = src.val;
-               lock_prefix = 1;
-               break;
-       case 0xa0 ... 0xa1:     /* mov */
-               dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
-               dst.val = src.val;
-               _eip += ad_bytes;       /* skip src displacement */
-               break;
-       case 0xa2 ... 0xa3:     /* mov */
-               dst.val = (unsigned long)_regs[VCPU_REGS_RAX];
-               _eip += ad_bytes;       /* skip dst displacement */
-               break;
-       case 0x88 ... 0x8b:     /* mov */
-       case 0xc6 ... 0xc7:     /* mov (sole member of Grp11) */
-               dst.val = src.val;
-               break;
-       case 0x8f:              /* pop (sole member of Grp1a) */
-               /* 64-bit mode: POP always pops a 64-bit operand. */
-               if (mode == X86EMUL_MODE_PROT64)
-                       dst.bytes = 8;
-               if ((rc = ops->read_std(register_address(ctxt->ss_base,
-                                                        _regs[VCPU_REGS_RSP]),
-                                       &dst.val, dst.bytes, ctxt)) != 0)
-                       goto done;
-               register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes);
-               break;
-       case 0xc0 ... 0xc1:
-             grp2:             /* Grp2 */
-               switch (modrm_reg) {
-               case 0: /* rol */
-                       emulate_2op_SrcB("rol", src, dst, _eflags);
+               case 0x18 ... 0x1d:
+sbb:   /* sbb */
+                       emulate_2op_SrcV("sbb", src, dst, _eflags);
                        break;
-               case 1: /* ror */
-                       emulate_2op_SrcB("ror", src, dst, _eflags);
+               case 0x20 ... 0x25:
+and:   /* and */
+                       emulate_2op_SrcV("and", src, dst, _eflags);
                        break;
-               case 2: /* rcl */
-                       emulate_2op_SrcB("rcl", src, dst, _eflags);
+               case 0x28 ... 0x2d:
+sub:   /* sub */
+                       emulate_2op_SrcV("sub", src, dst, _eflags);
                        break;
-               case 3: /* rcr */
-                       emulate_2op_SrcB("rcr", src, dst, _eflags);
+               case 0x30 ... 0x35:
+xor:   /* xor */
+                       emulate_2op_SrcV("xor", src, dst, _eflags);
                        break;
-               case 4: /* sal/shl */
-               case 6: /* sal/shl */
-                       emulate_2op_SrcB("sal", src, dst, _eflags);
+               case 0x38 ... 0x3d:
+cmp:   /* cmp */
+                       emulate_2op_SrcV("cmp", src, dst, _eflags);
                        break;
-               case 5: /* shr */
-                       emulate_2op_SrcB("shr", src, dst, _eflags);
+               case 0x63:      /* movsxd */
+                       if (mode != X86EMUL_MODE_PROT64)
+                               goto cannot_emulate;
+                       dst.val = (int32_t) src.val;
+                       break;
+               case 0x80 ... 0x83:     /* Grp1 */
+                       switch (modrm_reg) {
+                               case 0:
+                                       goto add;
+                               case 1:
+                                       goto or;
+                               case 2:
+                                       goto adc;
+                               case 3:
+                                       goto sbb;
+                               case 4:
+                                       goto and;
+                               case 5:
+                                       goto sub;
+                               case 6:
+                                       goto xor;
+                               case 7:
+                                       goto cmp;
+                       }
                        break;
-               case 7: /* sar */
-                       emulate_2op_SrcB("sar", src, dst, _eflags);
+               case 0x84 ... 0x85:
+test:  /* test */
+                       emulate_2op_SrcV("test", src, dst, _eflags);
                        break;
-               }
-               break;
-       case 0xd0 ... 0xd1:     /* Grp2 */
-               src.val = 1;
-               goto grp2;
-       case 0xd2 ... 0xd3:     /* Grp2 */
-               src.val = _regs[VCPU_REGS_RCX];
-               goto grp2;
-       case 0xf6 ... 0xf7:     /* Grp3 */
-               switch (modrm_reg) {
-               case 0 ... 1:   /* test */
+               case 0x86 ... 0x87:     /* xchg */
+                       /* Write back the register source. */
+                       switch (dst.bytes) {
+                               case 1:
+                                       *(uint8_t *) src.ptr = (uint8_t) dst.val;
+                                       break;
+                               case 2:
+                                       *(uint16_t *) src.ptr = (uint16_t) dst.val;
+                                       break;
+                               case 4:
+                                       *src.ptr = (uint32_t) dst.val;
+                                       break;  /* 64b reg: zero-extend */
+                               case 8:
+                                       *src.ptr = dst.val;
+                                       break;
+                       }
                        /*
-                        * Special case in Grp3: test has an immediate
-                        * source operand.
+                        * Write back the memory destination with implicit LOCK
+                        * prefix.
                         */
-                       src.type = OP_IMM;
-                       src.ptr = (unsigned long *)_eip;
-                       src.bytes = (d & ByteOp) ? 1 : op_bytes;
-                       if (src.bytes == 8)
-                               src.bytes = 4;
-                       switch (src.bytes) {
-                       case 1:
-                               src.val = insn_fetch(int8_t, 1, _eip);
-                               break;
-                       case 2:
-                               src.val = insn_fetch(int16_t, 2, _eip);
-                               break;
-                       case 4:
-                               src.val = insn_fetch(int32_t, 4, _eip);
-                               break;
-                       }
-                       goto test;
-               case 2: /* not */
-                       dst.val = ~dst.val;
+                       dst.val = src.val;
+                       lock_prefix = 1;
                        break;
-               case 3: /* neg */
-                       emulate_1op("neg", dst, _eflags);
+               case 0xa0 ... 0xa1:     /* mov */
+                       dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
+                       dst.val = src.val;
+                       _eip += ad_bytes;       /* skip src displacement */
                        break;
-               default:
-                       goto cannot_emulate;
-               }
-               break;
-       case 0xfe ... 0xff:     /* Grp4/Grp5 */
-               switch (modrm_reg) {
-               case 0: /* inc */
-                       emulate_1op("inc", dst, _eflags);
+               case 0xa2 ... 0xa3:     /* mov */
+                       dst.val = (unsigned long)_regs[VCPU_REGS_RAX];
+                       _eip += ad_bytes;       /* skip dst displacement */
                        break;
-               case 1: /* dec */
-                       emulate_1op("dec", dst, _eflags);
+               case 0x88 ... 0x8b:     /* mov */
+               case 0xc6 ... 0xc7:     /* mov (sole member of Grp11) */
+                       dst.val = src.val;
                        break;
-               case 6: /* push */
-                       /* 64-bit mode: PUSH always pushes a 64-bit operand. */
-                       if (mode == X86EMUL_MODE_PROT64) {
+               case 0x8f:      /* pop (sole member of Grp1a) */
+                       /* 64-bit mode: POP always pops a 64-bit operand. */
+                       if (mode == X86EMUL_MODE_PROT64)
                                dst.bytes = 8;
-                               if ((rc = ops->read_std((unsigned long)dst.ptr,
-                                                       &dst.val, 8,
-                                                       ctxt)) != 0)
-                                       goto done;
-                       }
-                       register_address_increment(_regs[VCPU_REGS_RSP],
-                                                  -dst.bytes);
-                       if ((rc = ops->write_std(
-                                    register_address(ctxt->ss_base,
-                                                     _regs[VCPU_REGS_RSP]),
-                                    dst.val, dst.bytes, ctxt)) != 0)
+                       if ((rc = ops->read_std(register_address(ctxt->ss_base,
+                                                                                                        _regs[VCPU_REGS_RSP]),
+                                                                       &dst.val, dst.bytes, ctxt)) != 0)
                                goto done;
-                       dst.val = dst.orig_val; /* skanky: disable writeback */
+                       register_address_increment(_regs[VCPU_REGS_RSP], dst.bytes);
+                       break;
+               case 0xc0 ... 0xc1:
+grp2:  /* Grp2 */
+                       switch (modrm_reg) {
+                               case 0: /* rol */
+                                       emulate_2op_SrcB("rol", src, dst, _eflags);
+                                       break;
+                               case 1: /* ror */
+                                       emulate_2op_SrcB("ror", src, dst, _eflags);
+                                       break;
+                               case 2: /* rcl */
+                                       emulate_2op_SrcB("rcl", src, dst, _eflags);
+                                       break;
+                               case 3: /* rcr */
+                                       emulate_2op_SrcB("rcr", src, dst, _eflags);
+                                       break;
+                               case 4: /* sal/shl */
+                               case 6: /* sal/shl */
+                                       emulate_2op_SrcB("sal", src, dst, _eflags);
+                                       break;
+                               case 5: /* shr */
+                                       emulate_2op_SrcB("shr", src, dst, _eflags);
+                                       break;
+                               case 7: /* sar */
+                                       emulate_2op_SrcB("sar", src, dst, _eflags);
+                                       break;
+                       }
+                       break;
+               case 0xd0 ... 0xd1:     /* Grp2 */
+                       src.val = 1;
+                       goto grp2;
+               case 0xd2 ... 0xd3:     /* Grp2 */
+                       src.val = _regs[VCPU_REGS_RCX];
+                       goto grp2;
+               case 0xf6 ... 0xf7:     /* Grp3 */
+                       switch (modrm_reg) {
+                               case 0 ... 1:   /* test */
+                                       /*
+                                        * Special case in Grp3: test has an immediate
+                                        * source operand.
+                                        */
+                                       src.type = OP_IMM;
+                                       src.ptr = (unsigned long *)_eip;
+                                       src.bytes = (d & ByteOp) ? 1 : op_bytes;
+                                       if (src.bytes == 8)
+                                               src.bytes = 4;
+                                       switch (src.bytes) {
+                                               case 1:
+                                                       src.val = insn_fetch(int8_t, 1, _eip);
+                                                       break;
+                                               case 2:
+                                                       src.val = insn_fetch(int16_t, 2, _eip);
+                                                       break;
+                                               case 4:
+                                                       src.val = insn_fetch(int32_t, 4, _eip);
+                                                       break;
+                                       }
+                                       goto test;
+                               case 2: /* not */
+                                       dst.val = ~dst.val;
+                                       break;
+                               case 3: /* neg */
+                                       emulate_1op("neg", dst, _eflags);
+                                       break;
+                               default:
+                                       goto cannot_emulate;
+                       }
+                       break;
+               case 0xfe ... 0xff:     /* Grp4/Grp5 */
+                       switch (modrm_reg) {
+                               case 0: /* inc */
+                                       emulate_1op("inc", dst, _eflags);
+                                       break;
+                               case 1: /* dec */
+                                       emulate_1op("dec", dst, _eflags);
+                                       break;
+                               case 6: /* push */
+                                       /* 64-bit mode: PUSH always pushes a 64-bit operand. */
+                                       if (mode == X86EMUL_MODE_PROT64) {
+                                               dst.bytes = 8;
+                                               if ((rc = ops->read_std((unsigned long)dst.ptr,
+                                                                                               &dst.val, 8, ctxt)) != 0)
+                                                       goto done;
+                                       }
+                                       register_address_increment(_regs[VCPU_REGS_RSP],
+                                                                                          -dst.bytes);
+                                       if ((rc = ops->write_std(register_address(ctxt->ss_base,
+                                                                                                                         _regs
+                                                                                                                         [VCPU_REGS_RSP]),
+                                                                                        dst.val, dst.bytes, ctxt)) != 0)
+                                               goto done;
+                                       dst.val = dst.orig_val; /* skanky: disable writeback */
+                                       break;
+                               default:
+                                       goto cannot_emulate;
+                       }
                        break;
-               default:
-                       goto cannot_emulate;
-               }
-               break;
        }
 
 writeback:
        if ((d & Mov) || (dst.orig_val != dst.val)) {
                switch (dst.type) {
-               case OP_REG:
-                       /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
-                       switch (dst.bytes) {
-                       case 1:
-                               *(uint8_t *)dst.ptr = (uint8_t)dst.val;
-                               break;
-                       case 2:
-                               *(uint16_t *)dst.ptr = (uint16_t)dst.val;
+                       case OP_REG:
+                               /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+                               switch (dst.bytes) {
+                                       case 1:
+                                               *(uint8_t *) dst.ptr = (uint8_t) dst.val;
+                                               break;
+                                       case 2:
+                                               *(uint16_t *) dst.ptr = (uint16_t) dst.val;
+                                               break;
+                                       case 4:
+                                               *dst.ptr = (uint32_t) dst.val;
+                                               break;  /* 64b: zero-ext */
+                                       case 8:
+                                               *dst.ptr = dst.val;
+                                               break;
+                               }
                                break;
-                       case 4:
-                               *dst.ptr = (uint32_t)dst.val;
-                               break;  /* 64b: zero-ext */
-                       case 8:
-                               *dst.ptr = dst.val;
+                       case OP_MEM:
+                               if (lock_prefix)
+                                       rc = ops->cmpxchg_emulated((unsigned long)dst.ptr,
+                                                                                          dst.orig_val, dst.val, dst.bytes,
+                                                                                          ctxt);
+                               else
+                                       rc = ops->write_emulated((unsigned long)dst.ptr,
+                                                                                        dst.val, dst.bytes, ctxt);
+                               if (rc != 0)
+                                       goto done;
+                       default:
                                break;
-                       }
-                       break;
-               case OP_MEM:
-                       if (lock_prefix)
-                               rc = ops->cmpxchg_emulated((unsigned long)dst.
-                                                          ptr, dst.orig_val,
-                                                          dst.val, dst.bytes,
-                                                          ctxt);
-                       else
-                               rc = ops->write_emulated((unsigned long)dst.ptr,
-                                                        dst.val, dst.bytes,
-                                                        ctxt);
-                       if (rc != 0)
-                               goto done;
-               default:
-                       break;
                }
        }
 
@@ -1096,173 +1089,180 @@ special_insn:
                _eip = ctxt->vcpu->rip;
        }
        switch (b) {
-       case 0xa4 ... 0xa5:     /* movs */
-               dst.type = OP_MEM;
-               dst.bytes = (d & ByteOp) ? 1 : op_bytes;
-               dst.ptr = (unsigned long *)register_address(ctxt->es_base,
-                                                       _regs[VCPU_REGS_RDI]);
-               if ((rc = ops->read_emulated(register_address(
-                     override_base ? *override_base : ctxt->ds_base,
-                     _regs[VCPU_REGS_RSI]), &dst.val, dst.bytes, ctxt)) != 0)
-                       goto done;
-               register_address_increment(_regs[VCPU_REGS_RSI],
-                            (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
-               register_address_increment(_regs[VCPU_REGS_RDI],
-                            (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
-               break;
-       case 0xa6 ... 0xa7:     /* cmps */
-               DPRINTF("Urk! I don't handle CMPS.\n");
-               goto cannot_emulate;
-       case 0xaa ... 0xab:     /* stos */
-               dst.type = OP_MEM;
-               dst.bytes = (d & ByteOp) ? 1 : op_bytes;
-               dst.ptr = (unsigned long *)cr2;
-               dst.val = _regs[VCPU_REGS_RAX];
-               register_address_increment(_regs[VCPU_REGS_RDI],
-                            (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
-               break;
-       case 0xac ... 0xad:     /* lods */
-               dst.type = OP_REG;
-               dst.bytes = (d & ByteOp) ? 1 : op_bytes;
-               dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
-               if ((rc = ops->read_emulated(cr2, &dst.val, dst.bytes, ctxt)) != 0)
-                       goto done;
-               register_address_increment(_regs[VCPU_REGS_RSI],
-                          (_eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
-               break;
-       case 0xae ... 0xaf:     /* scas */
-               DPRINTF("Urk! I don't handle SCAS.\n");
-               goto cannot_emulate;
+               case 0xa4 ... 0xa5:     /* movs */
+                       dst.type = OP_MEM;
+                       dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+                       dst.ptr = (unsigned long *)register_address(ctxt->es_base,
+                                                                                                               _regs[VCPU_REGS_RDI]);
+                       if ((rc =
+                                ops->
+                                read_emulated(register_address
+                                                          (override_base ? *override_base : ctxt->ds_base,
+                                                               _regs[VCPU_REGS_RSI]), &dst.val, dst.bytes,
+                                                          ctxt)) != 0)
+                               goto done;
+                       register_address_increment(_regs[VCPU_REGS_RSI],
+                                                                          (_eflags & EFLG_DF) ? -dst.bytes : dst.
+                                                                          bytes);
+                       register_address_increment(_regs[VCPU_REGS_RDI],
+                                                                          (_eflags & EFLG_DF) ? -dst.bytes : dst.
+                                                                          bytes);
+                       break;
+               case 0xa6 ... 0xa7:     /* cmps */
+                       DPRINTF("Urk! I don't handle CMPS.\n");
+                       goto cannot_emulate;
+               case 0xaa ... 0xab:     /* stos */
+                       dst.type = OP_MEM;
+                       dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+                       dst.ptr = (unsigned long *)cr2;
+                       dst.val = _regs[VCPU_REGS_RAX];
+                       register_address_increment(_regs[VCPU_REGS_RDI],
+                                                                          (_eflags & EFLG_DF) ? -dst.bytes : dst.
+                                                                          bytes);
+                       break;
+               case 0xac ... 0xad:     /* lods */
+                       dst.type = OP_REG;
+                       dst.bytes = (d & ByteOp) ? 1 : op_bytes;
+                       dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
+                       if ((rc = ops->read_emulated(cr2, &dst.val, dst.bytes, ctxt)) != 0)
+                               goto done;
+                       register_address_increment(_regs[VCPU_REGS_RSI],
+                                                                          (_eflags & EFLG_DF) ? -dst.bytes : dst.
+                                                                          bytes);
+                       break;
+               case 0xae ... 0xaf:     /* scas */
+                       DPRINTF("Urk! I don't handle SCAS.\n");
+                       goto cannot_emulate;
        }
        goto writeback;
 
 twobyte_insn:
        switch (b) {
-       case 0x01: /* lgdt, lidt, lmsw */
-               switch (modrm_reg) {
-                       uint16_t size;
-                       unsigned long address;
+               case 0x01:      /* lgdt, lidt, lmsw */
+                       switch (modrm_reg) {
+                                       uint16_t size;
+                                       unsigned long address;
 
-               case 2: /* lgdt */
-                       rc = read_descriptor(ctxt, ops, src.ptr,
-                                            &size, &address, op_bytes);
-                       if (rc)
-                               goto done;
+                               case 2: /* lgdt */
+                                       rc = read_descriptor(ctxt, ops, src.ptr,
+                                                                                &size, &address, op_bytes);
+                                       if (rc)
+                                               goto done;
 #warning "implement realmode_lgdt"
-                       //realmode_lgdt(ctxt->vcpu, size, address);
-                       break;
-               case 3: /* lidt */
-                       rc = read_descriptor(ctxt, ops, src.ptr,
-                                            &size, &address, op_bytes);
-                       if (rc)
-                               goto done;
+                                       //realmode_lgdt(ctxt->vcpu, size, address);
+                                       break;
+                               case 3: /* lidt */
+                                       rc = read_descriptor(ctxt, ops, src.ptr,
+                                                                                &size, &address, op_bytes);
+                                       if (rc)
+                                               goto done;
 #warning "implement realmode_lidt"
-                       //realmode_lidt(ctxt->vcpu, size, address);
-                       break;
-               case 6: /* lmsw */
+                                       //realmode_lidt(ctxt->vcpu, size, address);
+                                       break;
+                               case 6: /* lmsw */
 #warning "implement realmod_lmsw"
-                       //realmode_lmsw(ctxt->vcpu, (uint16_t)modrm_val, &_eflags);
+                                       //realmode_lmsw(ctxt->vcpu, (uint16_t)modrm_val, &_eflags);
+                                       break;
+                               default:
+                                       goto cannot_emulate;
+                       }
                        break;
-               default:
-                       goto cannot_emulate;
-               }
-               break;
-       case 0x40 ... 0x4f:     /* cmov */
-               dst.val = dst.orig_val = src.val;
-               d &= ~Mov;      /* default to no move */
-               /*
-                * First, assume we're decoding an even cmov opcode
-                * (lsb == 0).
-                */
-               switch ((b & 15) >> 1) {
-               case 0: /* cmovo */
-                       d |= (_eflags & EFLG_OF) ? Mov : 0;
+               case 0x40 ... 0x4f:     /* cmov */
+                       dst.val = dst.orig_val = src.val;
+                       d &= ~Mov;      /* default to no move */
+                       /*
+                        * First, assume we're decoding an even cmov opcode
+                        * (lsb == 0).
+                        */
+                       switch ((b & 15) >> 1) {
+                               case 0: /* cmovo */
+                                       d |= (_eflags & EFLG_OF) ? Mov : 0;
+                                       break;
+                               case 1: /* cmovb/cmovc/cmovnae */
+                                       d |= (_eflags & EFLG_CF) ? Mov : 0;
+                                       break;
+                               case 2: /* cmovz/cmove */
+                                       d |= (_eflags & EFLG_ZF) ? Mov : 0;
+                                       break;
+                               case 3: /* cmovbe/cmovna */
+                                       d |= (_eflags & (EFLG_CF | EFLG_ZF)) ? Mov : 0;
+                                       break;
+                               case 4: /* cmovs */
+                                       d |= (_eflags & EFLG_SF) ? Mov : 0;
+                                       break;
+                               case 5: /* cmovp/cmovpe */
+                                       d |= (_eflags & EFLG_PF) ? Mov : 0;
+                                       break;
+                               case 7: /* cmovle/cmovng */
+                                       d |= (_eflags & EFLG_ZF) ? Mov : 0;
+                                       /* fall through */
+                               case 6: /* cmovl/cmovnge */
+                                       d |= (!(_eflags & EFLG_SF) !=
+                                                 !(_eflags & EFLG_OF)) ? Mov : 0;
+                                       break;
+                       }
+                       /* Odd cmov opcodes (lsb == 1) have inverted sense. */
+                       d ^= (b & 1) ? Mov : 0;
                        break;
-               case 1: /* cmovb/cmovc/cmovnae */
-                       d |= (_eflags & EFLG_CF) ? Mov : 0;
+               case 0xb0 ... 0xb1:     /* cmpxchg */
+                       /*
+                        * Save real source value, then compare EAX against
+                        * destination.
+                        */
+                       src.orig_val = src.val;
+                       src.val = _regs[VCPU_REGS_RAX];
+                       emulate_2op_SrcV("cmp", src, dst, _eflags);
+                       /* Always write back. The question is: where to? */
+                       d |= Mov;
+                       if (_eflags & EFLG_ZF) {
+                               /* Success: write back to memory. */
+                               dst.val = src.orig_val;
+                       } else {
+                               /* Failure: write the value we saw to EAX. */
+                               dst.type = OP_REG;
+                               dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
+                       }
                        break;
-               case 2: /* cmovz/cmove */
-                       d |= (_eflags & EFLG_ZF) ? Mov : 0;
+               case 0xa3:
+bt:    /* bt */
+                       src.val &= (dst.bytes << 3) - 1;        /* only subword offset */
+                       emulate_2op_SrcV_nobyte("bt", src, dst, _eflags);
                        break;
-               case 3: /* cmovbe/cmovna */
-                       d |= (_eflags & (EFLG_CF | EFLG_ZF)) ? Mov : 0;
+               case 0xb3:
+btr:   /* btr */
+                       src.val &= (dst.bytes << 3) - 1;        /* only subword offset */
+                       emulate_2op_SrcV_nobyte("btr", src, dst, _eflags);
                        break;
-               case 4: /* cmovs */
-                       d |= (_eflags & EFLG_SF) ? Mov : 0;
+               case 0xab:
+bts:   /* bts */
+                       src.val &= (dst.bytes << 3) - 1;        /* only subword offset */
+                       emulate_2op_SrcV_nobyte("bts", src, dst, _eflags);
                        break;
-               case 5: /* cmovp/cmovpe */
-                       d |= (_eflags & EFLG_PF) ? Mov : 0;
+               case 0xb6 ... 0xb7:     /* movzx */
+                       dst.bytes = op_bytes;
+                       dst.val = (d & ByteOp) ? (uint8_t) src.val : (uint16_t) src.val;
                        break;
-               case 7: /* cmovle/cmovng */
-                       d |= (_eflags & EFLG_ZF) ? Mov : 0;
-                       /* fall through */
-               case 6: /* cmovl/cmovnge */
-                       d |= (!(_eflags & EFLG_SF) !=
-                             !(_eflags & EFLG_OF)) ? Mov : 0;
+               case 0xbb:
+btc:   /* btc */
+                       src.val &= (dst.bytes << 3) - 1;        /* only subword offset */
+                       emulate_2op_SrcV_nobyte("btc", src, dst, _eflags);
+                       break;
+               case 0xba:      /* Grp8 */
+                       switch (modrm_reg & 3) {
+                               case 0:
+                                       goto bt;
+                               case 1:
+                                       goto bts;
+                               case 2:
+                                       goto btr;
+                               case 3:
+                                       goto btc;
+                       }
+                       break;
+               case 0xbe ... 0xbf:     /* movsx */
+                       dst.bytes = op_bytes;
+                       dst.val = (d & ByteOp) ? (int8_t) src.val : (int16_t) src.val;
                        break;
-               }
-               /* Odd cmov opcodes (lsb == 1) have inverted sense. */
-               d ^= (b & 1) ? Mov : 0;
-               break;
-       case 0xb0 ... 0xb1:     /* cmpxchg */
-               /*
-                * Save real source value, then compare EAX against
-                * destination.
-                */
-               src.orig_val = src.val;
-               src.val = _regs[VCPU_REGS_RAX];
-               emulate_2op_SrcV("cmp", src, dst, _eflags);
-               /* Always write back. The question is: where to? */
-               d |= Mov;
-               if (_eflags & EFLG_ZF) {
-                       /* Success: write back to memory. */
-                       dst.val = src.orig_val;
-               } else {
-                       /* Failure: write the value we saw to EAX. */
-                       dst.type = OP_REG;
-                       dst.ptr = (unsigned long *)&_regs[VCPU_REGS_RAX];
-               }
-               break;
-       case 0xa3:
-             bt:               /* bt */
-               src.val &= (dst.bytes << 3) - 1; /* only subword offset */
-               emulate_2op_SrcV_nobyte("bt", src, dst, _eflags);
-               break;
-       case 0xb3:
-             btr:              /* btr */
-               src.val &= (dst.bytes << 3) - 1; /* only subword offset */
-               emulate_2op_SrcV_nobyte("btr", src, dst, _eflags);
-               break;
-       case 0xab:
-             bts:              /* bts */
-               src.val &= (dst.bytes << 3) - 1; /* only subword offset */
-               emulate_2op_SrcV_nobyte("bts", src, dst, _eflags);
-               break;
-       case 0xb6 ... 0xb7:     /* movzx */
-               dst.bytes = op_bytes;
-               dst.val = (d & ByteOp) ? (uint8_t) src.val : (uint16_t) src.val;
-               break;
-       case 0xbb:
-             btc:              /* btc */
-               src.val &= (dst.bytes << 3) - 1; /* only subword offset */
-               emulate_2op_SrcV_nobyte("btc", src, dst, _eflags);
-               break;
-       case 0xba:              /* Grp8 */
-               switch (modrm_reg & 3) {
-               case 0:
-                       goto bt;
-               case 1:
-                       goto bts;
-               case 2:
-                       goto btr;
-               case 3:
-                       goto btc;
-               }
-               break;
-       case 0xbe ... 0xbf:     /* movsx */
-               dst.bytes = op_bytes;
-               dst.val = (d & ByteOp) ? (int8_t) src.val : (int16_t) src.val;
-               break;
        }
        goto writeback;
 
@@ -1270,71 +1270,73 @@ twobyte_special_insn:
        /* Disable writeback. */
        dst.orig_val = dst.val;
        switch (b) {
-       case 0x0d:              /* GrpP (prefetch) */
-       case 0x18:              /* Grp16 (prefetch/nop) */
-               break;
-       case 0x20: /* mov cr, reg */
-               b = insn_fetch(uint8_t, 1, _eip);
-               if ((b & 0xc0) != 0xc0)
-                       goto cannot_emulate;
+               case 0x0d:      /* GrpP (prefetch) */
+               case 0x18:      /* Grp16 (prefetch/nop) */
+                       break;
+               case 0x20:      /* mov cr, reg */
+                       b = insn_fetch(uint8_t, 1, _eip);
+                       if ((b & 0xc0) != 0xc0)
+                               goto cannot_emulate;
 #warning "implement realmode_get_cr"
-               //_regs[(b >> 3) & 7] = realmode_get_cr(ctxt->vcpu, b & 7);
-               break;
-       case 0x22: /* mov reg, cr */
-               b = insn_fetch(uint8_t, 1, _eip);
-               if ((b & 0xc0) != 0xc0)
-                       goto cannot_emulate;
+                       //_regs[(b >> 3) & 7] = realmode_get_cr(ctxt->vcpu, b & 7);
+                       break;
+               case 0x22:      /* mov reg, cr */
+                       b = insn_fetch(uint8_t, 1, _eip);
+                       if ((b & 0xc0) != 0xc0)
+                               goto cannot_emulate;
 #warning "implement realmod_set_cr"
-               //realmode_set_cr(ctxt->vcpu, b & 7, _regs[(b >> 3) & 7] & -1u,
-               //              &_eflags);
-               break;
-       case 0xc7:              /* Grp9 (cmpxchg8b) */
+                       //realmode_set_cr(ctxt->vcpu, b & 7, _regs[(b >> 3) & 7] & -1u,
+                       //      &_eflags);
+                       break;
+               case 0xc7:      /* Grp9 (cmpxchg8b) */
 #if defined(__i386__)
-               {
-                       unsigned long old_lo, old_hi;
-                       if (((rc = ops->read_emulated(cr2 + 0, &old_lo, 4,
-                                                     ctxt)) != 0)
-                           || ((rc = ops->read_emulated(cr2 + 4, &old_hi, 4,
-                                                        ctxt)) != 0))
-                               goto done;
-                       if ((old_lo != _regs[VCPU_REGS_RAX])
-                           || (old_hi != _regs[VCPU_REGS_RDI])) {
-                               _regs[VCPU_REGS_RAX] = old_lo;
-                               _regs[VCPU_REGS_RDX] = old_hi;
-                               _eflags &= ~EFLG_ZF;
-                       } else if (ops->cmpxchg8b_emulated == NULL) {
-                               rc = X86EMUL_UNHANDLEABLE;
-                               goto done;
-                       } else {
-                               if ((rc = ops->cmpxchg8b_emulated(cr2, old_lo,
-                                                         old_hi,
-                                                         _regs[VCPU_REGS_RBX],
-                                                         _regs[VCPU_REGS_RCX],
-                                                         ctxt)) != 0)
+                       {
+                               unsigned long old_lo, old_hi;
+                               if (((rc = ops->read_emulated(cr2 + 0, &old_lo, 4, ctxt)) != 0)
+                                       || ((rc = ops->read_emulated(cr2 + 4, &old_hi, 4,
+                                                                                                ctxt)) != 0))
                                        goto done;
-                               _eflags |= EFLG_ZF;
+                               if ((old_lo != _regs[VCPU_REGS_RAX])
+                                       || (old_hi != _regs[VCPU_REGS_RDI])) {
+                                       _regs[VCPU_REGS_RAX] = old_lo;
+                                       _regs[VCPU_REGS_RDX] = old_hi;
+                                       _eflags &= ~EFLG_ZF;
+                               } else if (ops->cmpxchg8b_emulated == NULL) {
+                                       rc = X86EMUL_UNHANDLEABLE;
+                                       goto done;
+                               } else {
+                                       if ((rc = ops->cmpxchg8b_emulated(cr2, old_lo,
+                                                                                                         old_hi,
+                                                                                                         _regs[VCPU_REGS_RBX],
+                                                                                                         _regs[VCPU_REGS_RCX],
+                                                                                                         ctxt)) != 0)
+                                               goto done;
+                                       _eflags |= EFLG_ZF;
+                               }
+                               break;
                        }
-                       break;
-               }
 #elif defined(__x86_64__)
-               {
-                       unsigned long old, new;
-                       if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0)
-                               goto done;
-                       if (((uint32_t) (old >> 0) != (uint32_t) _regs[VCPU_REGS_RAX]) ||
-                           ((uint32_t) (old >> 32) != (uint32_t) _regs[VCPU_REGS_RDX])) {
-                               _regs[VCPU_REGS_RAX] = (uint32_t) (old >> 0);
-                               _regs[VCPU_REGS_RDX] = (uint32_t) (old >> 32);
-                               _eflags &= ~EFLG_ZF;
-                       } else {
-                               new = (_regs[VCPU_REGS_RCX] << 32) | (uint32_t) _regs[VCPU_REGS_RBX];
-                               if ((rc = ops->cmpxchg_emulated(cr2, old,
-                                                         new, 8, ctxt)) != 0)
+                       {
+                               unsigned long old, new;
+                               if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0)
                                        goto done;
-                               _eflags |= EFLG_ZF;
+                               if (((uint32_t) (old >> 0) != (uint32_t) _regs[VCPU_REGS_RAX])
+                                       || ((uint32_t) (old >> 32) !=
+                                               (uint32_t) _regs[VCPU_REGS_RDX])) {
+                                       _regs[VCPU_REGS_RAX] = (uint32_t) (old >> 0);
+                                       _regs[VCPU_REGS_RDX] = (uint32_t) (old >> 32);
+                                       _eflags &= ~EFLG_ZF;
+                               } else {
+                                       new =
+                                               (_regs[VCPU_REGS_RCX] << 32) | (uint32_t)
+                                               _regs[VCPU_REGS_RBX];
+                                       if ((rc =
+                                                ops->cmpxchg_emulated(cr2, old, new, 8, ctxt)) != 0)
+                                               goto done;
+                                       _eflags |= EFLG_ZF;
+                               }
+                               break;
                        }
-                       break;
-               }
 #endif
        }
        goto writeback;
@@ -1343,4 +1345,3 @@ cannot_emulate:
        printd("Cannot emulate %02x\n", b);
        return -1;
 }
-
index a9c2da0..eae4c1a 100644 (file)
@@ -48,9 +48,9 @@ struct x86_emulate_ctxt;
 /* Access is unhandleable: bail from emulation and return error to caller. */
 #define X86EMUL_UNHANDLEABLE    1
 /* Terminate emulation but return success to the caller. */
-#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
-#define X86EMUL_RETRY_INSTR     2 /* retry the instruction for some reason */
-#define X86EMUL_CMPXCHG_FAILED  2 /* cmpxchg did not see expected value */
+#define X86EMUL_PROPAGATE_FAULT 2      /* propagate a generated fault to guest */
+#define X86EMUL_RETRY_INSTR     2      /* retry the instruction for some reason */
+#define X86EMUL_CMPXCHG_FAILED  2      /* cmpxchg did not see expected value */
 struct x86_emulate_ops {
        /*
         * read_std: Read bytes of standard (non-emulated/special) memory.
@@ -59,9 +59,9 @@ struct x86_emulate_ops {
         *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to read from memory.
         */
-       int (*read_std)(unsigned long addr,
-                       unsigned long *val,
-                       unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+       int (*read_std) (unsigned long addr,
+                                        unsigned long *val,
+                                        unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -71,9 +71,9 @@ struct x86_emulate_ops {
         *                required).
         *  @bytes: [IN ] Number of bytes to write to memory.
         */
-       int (*write_std)(unsigned long addr,
-                        unsigned long val,
-                        unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+       int (*write_std) (unsigned long addr,
+                                         unsigned long val,
+                                         unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * read_emulated: Read bytes from emulated/special memory area.
@@ -82,9 +82,8 @@ struct x86_emulate_ops {
         *  @bytes: [IN ] Number of bytes to read from memory.
         */
        int (*read_emulated) (unsigned long addr,
-                             unsigned long *val,
-                             unsigned int bytes,
-                             struct x86_emulate_ctxt * ctxt);
+                                                 unsigned long *val,
+                                                 unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * write_emulated: Read bytes from emulated/special memory area.
@@ -94,9 +93,8 @@ struct x86_emulate_ops {
         *  @bytes: [IN ] Number of bytes to write to memory.
         */
        int (*write_emulated) (unsigned long addr,
-                              unsigned long val,
-                              unsigned int bytes,
-                              struct x86_emulate_ctxt * ctxt);
+                                                  unsigned long val,
+                                                  unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -107,10 +105,10 @@ struct x86_emulate_ops {
         *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
         */
        int (*cmpxchg_emulated) (unsigned long addr,
-                                unsigned long old,
-                                unsigned long new,
-                                unsigned int bytes,
-                                struct x86_emulate_ctxt * ctxt);
+                                                        unsigned long old,
+                                                        unsigned long new,
+                                                        unsigned int bytes,
+                                                        struct x86_emulate_ctxt * ctxt);
 
        /*
         * cmpxchg8b_emulated: Emulate an atomic (LOCKed) CMPXCHG8B operation on an
@@ -125,11 +123,11 @@ struct x86_emulate_ops {
         *     to defining a function that always returns X86EMUL_UNHANDLEABLE.
         */
        int (*cmpxchg8b_emulated) (unsigned long addr,
-                                  unsigned long old_lo,
-                                  unsigned long old_hi,
-                                  unsigned long new_lo,
-                                  unsigned long new_hi,
-                                  struct x86_emulate_ctxt * ctxt);
+                                                          unsigned long old_lo,
+                                                          unsigned long old_hi,
+                                                          unsigned long new_lo,
+                                                          unsigned long new_hi,
+                                                          struct x86_emulate_ctxt * ctxt);
 };
 
 struct cpu_user_regs;
@@ -172,7 +170,7 @@ struct x86_emulate_ctxt {
  * Returns -1 on failure, 0 on success.
  */
 int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
-                     struct x86_emulate_ops *ops);
+                                         struct x86_emulate_ops *ops);
 
 /*
  * Given the 'reg' portion of a ModRM byte, and a register block, return a
@@ -180,6 +178,6 @@ int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
  */
 void *decode_register(uint8_t modrm_reg, unsigned long *regs,
-                     int highbyte_regs);
+                                         int highbyte_regs);
 
-#endif                         /* __X86_EMULATE_H__ */
+#endif /* __X86_EMULATE_H__ */
index 12d5e6f..72f4424 100644 (file)
@@ -4,24 +4,24 @@
 /* CPU model specific register (MSR) numbers */
 
 /* x86-64 specific MSRs */
-#define MSR_EFER               0xc0000080 /* extended feature register */
-#define MSR_STAR               0xc0000081 /* legacy mode SYSCALL target */
-#define MSR_LSTAR              0xc0000082 /* long mode SYSCALL target */
-#define MSR_CSTAR              0xc0000083 /* compat mode SYSCALL target */
-#define MSR_SYSCALL_MASK       0xc0000084 /* EFLAGS mask for syscall */
-#define MSR_FS_BASE            0xc0000100 /* 64bit FS base */
-#define MSR_GS_BASE            0xc0000101 /* 64bit GS base */
-#define MSR_KERNEL_GS_BASE     0xc0000102 /* SwapGS GS shadow */
-#define MSR_TSC_AUX            0xc0000103 /* Auxiliary TSC */
+#define MSR_EFER               0xc0000080      /* extended feature register */
+#define MSR_STAR               0xc0000081      /* legacy mode SYSCALL target */
+#define MSR_LSTAR              0xc0000082      /* long mode SYSCALL target */
+#define MSR_CSTAR              0xc0000083      /* compat mode SYSCALL target */
+#define MSR_SYSCALL_MASK       0xc0000084      /* EFLAGS mask for syscall */
+#define MSR_FS_BASE            0xc0000100      /* 64bit FS base */
+#define MSR_GS_BASE            0xc0000101      /* 64bit GS base */
+#define MSR_KERNEL_GS_BASE     0xc0000102      /* SwapGS GS shadow */
+#define MSR_TSC_AUX            0xc0000103      /* Auxiliary TSC */
 
 /* EFER bits: */
-#define _EFER_SCE              0  /* SYSCALL/SYSRET */
-#define _EFER_LME              8  /* Long mode enable */
-#define _EFER_LMA              10 /* Long mode active (read-only) */
-#define _EFER_NX               11 /* No execute enable */
-#define _EFER_SVME             12 /* Enable virtualization */
-#define _EFER_LMSLE            13 /* Long Mode Segment Limit Enable */
-#define _EFER_FFXSR            14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_SCE              0       /* SYSCALL/SYSRET */
+#define _EFER_LME              8       /* Long mode enable */
+#define _EFER_LMA              10      /* Long mode active (read-only) */
+#define _EFER_NX               11      /* No execute enable */
+#define _EFER_SVME             12      /* Enable virtualization */
+#define _EFER_LMSLE            13      /* Long Mode Segment Limit Enable */
+#define _EFER_FFXSR            14      /* Enable Fast FXSAVE/FXRSTOR */
 
 #define EFER_SCE               (1<<_EFER_SCE)
 #define EFER_LME               (1<<_EFER_LME)
@@ -94,8 +94,8 @@
 #define MSR_IA32_LASTINTTOIP           0x000001de
 
 /* DEBUGCTLMSR bits (others vary by model): */
-#define DEBUGCTLMSR_LBR                        (1UL <<  0) /* last branch recording */
-#define DEBUGCTLMSR_BTF                        (1UL <<  1) /* single-step on branches */
+#define DEBUGCTLMSR_LBR                        (1UL <<  0)     /* last branch recording */
+#define DEBUGCTLMSR_BTF                        (1UL <<  1)     /* single-step on branches */
 #define DEBUGCTLMSR_TR                 (1UL <<  6)
 #define DEBUGCTLMSR_BTS                        (1UL <<  7)
 #define DEBUGCTLMSR_BTINT              (1UL <<  8)
 #define MSR_AMD64_IBSOP_REG_MASK       ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
 #define MSR_AMD64_IBSCTL               0xc001103a
 #define MSR_AMD64_IBSBRTARGET          0xc001103b
-#define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_IBS_REG_COUNT_MAX    8       /* includes MSR_AMD64_IBSBRTARGET */
 
 /* Fam 15h MSRs */
 #define MSR_F15H_PERF_CTL              0xc0010200
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK                0x18000000
 #define MSR_K8_TSEG_ADDR               0xc0010112
-#define K8_MTRRFIXRANGE_DRAM_ENABLE    0x00040000 /* MtrrFixDramEn bit    */
-#define K8_MTRRFIXRANGE_DRAM_MODIFY    0x00080000 /* MtrrFixDramModEn bit */
-#define K8_MTRR_RDMEM_WRMEM_MASK       0x18181818 /* Mask: RdMem|WrMem    */
+#define K8_MTRRFIXRANGE_DRAM_ENABLE    0x00040000      /* MtrrFixDramEn bit    */
+#define K8_MTRRFIXRANGE_DRAM_MODIFY    0x00080000      /* MtrrFixDramModEn bit */
+#define K8_MTRR_RDMEM_WRMEM_MASK       0x18181818      /* Mask: RdMem|WrMem    */
 
 /* K7 MSRs */
 #define MSR_K7_EVNTSEL0                        0xc0010000
 #define MSR_P4_SAAT_ESCR0              0x000003ae
 #define MSR_P4_SAAT_ESCR1              0x000003af
 #define MSR_P4_SSU_ESCR0               0x000003be
-#define MSR_P4_SSU_ESCR1               0x000003bf /* guess: not in manual */
+#define MSR_P4_SSU_ESCR1               0x000003bf      /* guess: not in manual */
 
 #define MSR_P4_TBPU_ESCR0              0x000003c2
 #define MSR_P4_TBPU_ESCR1              0x000003c3
index 498f844..6c4fd23 100644 (file)
@@ -4,29 +4,29 @@
  */
 
 #if PTTYPE == 64
-       #define pt_element_t uint64_t
-       #define guest_walker guest_walker64
-       #define FNAME(name) paging##64_##name
-       #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
-       #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
-       #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
-       #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
-       #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
-       #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
-       #define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK
+#define pt_element_t uint64_t
+#define guest_walker guest_walker64
+#define FNAME(name) paging##64_##name
+#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
+#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
+#define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
+#define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK
 #elif PTTYPE == 32
-       #define pt_element_t uint32_t
-       #define guest_walker guest_walker32
-       #define FNAME(name) paging##32_##name
-       #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
-       #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
-       #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
-       #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
-       #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
-       #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
-       #define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK
+#define pt_element_t uint32_t
+#define guest_walker guest_walker32
+#define FNAME(name) paging##32_##name
+#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
+#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
+#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
+#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
+#define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
+#define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK
 #else
-       #error Invalid PTTYPE value
+#error Invalid PTTYPE value
 #endif
 
 /*
@@ -39,47 +39,45 @@ struct guest_walker {
        pt_element_t inherited_ar;
 };
 
-static void FNAME(init_walker)(struct guest_walker *walker,
-                              struct litevm_vcpu *vcpu)
-{
+static void FNAME(init_walker) (struct guest_walker * walker,
+                                                               struct litevm_vcpu * vcpu) {
        hpa_t hpa;
        struct litevm_memory_slot *slot;
 
        walker->level = vcpu->mmu.root_level;
        slot = gfn_to_memslot(vcpu->litevm,
-                             (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+                                                 (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
        hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
        // well, it seems that stuff is always addressable in akaros. I hope.
        //walker->table = vmap_pmem(ppn2page(hpa >> PAGE_SHIFT), PAGE_SIZE);
        walker->table = KADDR(hpa);
 
        ASSERT((!is_long_mode() && is_pae()) ||
-              (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
+                  (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
 
-       walker->table = (pt_element_t *)( (unsigned long)walker->table |
-               (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
+       walker->table = (pt_element_t *) ((unsigned long)walker->table |
+                                                                         (unsigned long)(vcpu->
+                                                                                                         cr3 & ~(PAGE_MASK |
+                                                                                                                         CR3_FLAGS_MASK)));
        walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
 }
 
-static void FNAME(release_walker)(struct guest_walker *walker)
-{
+static void FNAME(release_walker) (struct guest_walker * walker) {
        //vunmap_pmem(walker->table, PAGE_SIZE);
 }
 
-static void FNAME(set_pte)(struct litevm_vcpu *vcpu, uint64_t guest_pte,
-                          uint64_t *shadow_pte, uint64_t access_bits)
-{
+static void FNAME(set_pte) (struct litevm_vcpu * vcpu, uint64_t guest_pte,
+                                                       uint64_t * shadow_pte, uint64_t access_bits) {
        ASSERT(*shadow_pte == 0);
        access_bits &= guest_pte;
        *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
        set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
-                      guest_pte & PT_DIRTY_MASK, access_bits);
+                                  guest_pte & PT_DIRTY_MASK, access_bits);
 }
 
-static void FNAME(set_pde)(struct litevm_vcpu *vcpu, uint64_t guest_pde,
-                          uint64_t *shadow_pte, uint64_t access_bits,
-                          int index)
-{
+static void FNAME(set_pde) (struct litevm_vcpu * vcpu, uint64_t guest_pde,
+                                                       uint64_t * shadow_pte, uint64_t access_bits,
+                                                       int index) {
        gpa_t gaddr;
 
        ASSERT(*shadow_pte == 0);
@@ -89,34 +87,31 @@ static void FNAME(set_pde)(struct litevm_vcpu *vcpu, uint64_t guest_pde,
                gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
                        (32 - PT32_DIR_PSE36_SHIFT);
        *shadow_pte = (guest_pde & PT_NON_PTE_COPY_MASK) |
-                         ((guest_pde & PT_DIR_PAT_MASK) >>
-                                   (PT_DIR_PAT_SHIFT - PT_PAT_SHIFT));
+               ((guest_pde & PT_DIR_PAT_MASK) >> (PT_DIR_PAT_SHIFT - PT_PAT_SHIFT));
        set_pte_common(vcpu, shadow_pte, gaddr,
-                      guest_pde & PT_DIRTY_MASK, access_bits);
+                                  guest_pde & PT_DIRTY_MASK, access_bits);
 }
 
 /*
  * Fetch a guest pte from a specific level in the paging hierarchy.
  */
-static pt_element_t *FNAME(fetch_guest)(struct litevm_vcpu *vcpu,
-                                       struct guest_walker *walker,
-                                       int level,
-                                       gva_t addr)
-{
+static pt_element_t *FNAME(fetch_guest) (struct litevm_vcpu * vcpu,
+                                                                                struct guest_walker * walker,
+                                                                                int level, gva_t addr) {
 
-       ASSERT(level > 0  && level <= walker->level);
+       ASSERT(level > 0 && level <= walker->level);
 
        for (;;) {
                int index = PT_INDEX(addr, walker->level);
                hpa_t paddr;
 
                ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
-                      ((unsigned long)&walker->table[index] & PAGE_MASK));
+                          ((unsigned long)&walker->table[index] & PAGE_MASK));
                if (level == walker->level ||
-                   !is_present_pte(walker->table[index]) ||
-                   (walker->level == PT_DIRECTORY_LEVEL &&
-                    (walker->table[index] & PT_PAGE_SIZE_MASK) &&
-                    (PTTYPE == 64 || is_pse())))
+                       !is_present_pte(walker->table[index]) ||
+                       (walker->level == PT_DIRECTORY_LEVEL &&
+                        (walker->table[index] & PT_PAGE_SIZE_MASK) &&
+                        (PTTYPE == 64 || is_pse())))
                        return &walker->table[index];
                if (walker->level != 3 || is_long_mode())
                        walker->inherited_ar &= walker->table[index];
@@ -131,9 +126,8 @@ static pt_element_t *FNAME(fetch_guest)(struct litevm_vcpu *vcpu,
 /*
  * Fetch a shadow pte for a specific level in the paging hierarchy.
  */
-static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
-                             struct guest_walker *walker)
-{
+static uint64_t *FNAME(fetch) (struct litevm_vcpu * vcpu, gva_t addr,
+                                                          struct guest_walker * walker) {
        hpa_t shadow_addr;
        int level;
        uint64_t *prev_shadow_ent = NULL;
@@ -141,9 +135,9 @@ static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
        shadow_addr = vcpu->mmu.root_hpa;
        level = vcpu->mmu.shadow_root_level;
 
-       for (; ; level--) {
+       for (;; level--) {
                uint32_t index = SHADOW_PT_INDEX(addr, level);
-               uint64_t *shadow_ent = ((uint64_t *)KADDR(shadow_addr)) + index;
+               uint64_t *shadow_ent = ((uint64_t *) KADDR(shadow_addr)) + index;
                pt_element_t *guest_ent;
 
                if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
@@ -156,11 +150,10 @@ static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
 
                if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) {
                        ASSERT(level == PT32E_ROOT_LEVEL);
-                       guest_ent = FNAME(fetch_guest)(vcpu, walker,
-                                                      PT32_ROOT_LEVEL, addr);
+                       guest_ent = FNAME(fetch_guest) (vcpu, walker,
+                                                                                       PT32_ROOT_LEVEL, addr);
                } else
-                       guest_ent = FNAME(fetch_guest)(vcpu, walker,
-                                                      level, addr);
+                       guest_ent = FNAME(fetch_guest) (vcpu, walker, level, addr);
 
                if (!is_present_pte(*guest_ent))
                        return NULL;
@@ -174,12 +167,13 @@ static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
                        if (walker->level == PT_DIRECTORY_LEVEL) {
                                if (prev_shadow_ent)
                                        *prev_shadow_ent |= PT_SHADOW_PS_MARK;
-                               FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
-                                              walker->inherited_ar,
-                                         PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
+                               FNAME(set_pde) (vcpu, *guest_ent, shadow_ent,
+                                                               walker->inherited_ar,
+                                                               PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
                        } else {
                                ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
-                               FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
+                               FNAME(set_pte) (vcpu, *guest_ent, shadow_ent,
+                                                               walker->inherited_ar);
                        }
                        return shadow_ent;
                }
@@ -191,8 +185,7 @@ static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
                        *shadow_ent = shadow_addr |
                                (*guest_ent & (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK));
                else {
-                       *shadow_ent = shadow_addr |
-                               (*guest_ent & PT_NON_PTE_COPY_MASK);
+                       *shadow_ent = shadow_addr | (*guest_ent & PT_NON_PTE_COPY_MASK);
                        *shadow_ent |= (PT_WRITABLE_MASK | PT_USER_MASK);
                }
                prev_shadow_ent = shadow_ent;
@@ -206,12 +199,10 @@ static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
  * - update the guest pte dirty bit
  * - update our own dirty page tracking structures
  */
-static int FNAME(fix_write_pf)(struct litevm_vcpu *vcpu,
-                              uint64_t *shadow_ent,
-                              struct guest_walker *walker,
-                              gva_t addr,
-                              int user)
-{
+static int FNAME(fix_write_pf) (struct litevm_vcpu * vcpu,
+                                                               uint64_t * shadow_ent,
+                                                               struct guest_walker * walker,
+                                                               gva_t addr, int user) {
        pt_element_t *guest_ent;
        int writable_shadow;
        gfn_t gfn;
@@ -233,13 +224,13 @@ static int FNAME(fix_write_pf)(struct litevm_vcpu *vcpu,
                 * Kernel mode access.  Fail if it's a read-only page and
                 * supervisor write protection is enabled.
                 */
-               if (!writable_shadow) {
-                       if (is_write_protection())
-                               return 0;
-                       *shadow_ent &= ~PT_USER_MASK;
-               }
+       if (!writable_shadow) {
+               if (is_write_protection())
+                       return 0;
+               *shadow_ent &= ~PT_USER_MASK;
+       }
 
-       guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr);
+       guest_ent = FNAME(fetch_guest) (vcpu, walker, PT_PAGE_TABLE_LEVEL, addr);
 
        if (!is_present_pte(*guest_ent)) {
                *shadow_ent = 0;
@@ -267,9 +258,8 @@ static int FNAME(fix_write_pf)(struct litevm_vcpu *vcpu,
  *
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise
  */
-static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
-                              uint32_t error_code)
-{
+static int FNAME(page_fault) (struct litevm_vcpu * vcpu, gva_t addr,
+                                                         uint32_t error_code) {
        int write_fault = error_code & PFERR_WRITE_MASK;
        int pte_present = error_code & PFERR_PRESENT_MASK;
        int user_fault = error_code & PFERR_USER_MASK;
@@ -281,11 +271,11 @@ static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
         * Look up the shadow pte for the faulting address.
         */
        for (;;) {
-               FNAME(init_walker)(&walker, vcpu);
-               shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
-               if (IS_ERR(shadow_pte)) {  /* must be -ENOMEM */
+               FNAME(init_walker) (&walker, vcpu);
+               shadow_pte = FNAME(fetch) (vcpu, addr, &walker);
+               if (IS_ERR(shadow_pte)) {       /* must be -ENOMEM */
                        nonpaging_flush(vcpu);
-                       FNAME(release_walker)(&walker);
+                       FNAME(release_walker) (&walker);
                        continue;
                }
                break;
@@ -296,7 +286,7 @@ static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
         */
        if (!shadow_pte) {
                inject_page_fault(vcpu, addr, error_code);
-               FNAME(release_walker)(&walker);
+               FNAME(release_walker) (&walker);
                return 0;
        }
 
@@ -304,12 +294,12 @@ static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
         * Update the shadow pte.
         */
        if (write_fault)
-               fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
-                                           user_fault);
+               fixed = FNAME(fix_write_pf) (vcpu, shadow_pte, &walker, addr,
+                                                                        user_fault);
        else
                fixed = fix_read_pf(shadow_pte);
 
-       FNAME(release_walker)(&walker);
+       FNAME(release_walker) (&walker);
 
        /*
         * mmio: emulate if accessible, otherwise its a guest fault.
@@ -318,8 +308,7 @@ static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
                if (may_access(*shadow_pte, write_fault, user_fault))
                        return 1;
                pgprintk("%s: io work, no access\n", __FUNCTION__);
-               inject_page_fault(vcpu, addr,
-                                 error_code | PFERR_PRESENT_MASK);
+               inject_page_fault(vcpu, addr, error_code | PFERR_PRESENT_MASK);
                return 0;
        }
 
@@ -336,16 +325,14 @@ static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
        return 0;
 }
 
-static gpa_t FNAME(gva_to_gpa)(struct litevm_vcpu *vcpu, gva_t vaddr)
-{
+static gpa_t FNAME(gva_to_gpa) (struct litevm_vcpu * vcpu, gva_t vaddr) {
        struct guest_walker walker;
        pt_element_t guest_pte;
        gpa_t gpa;
 
-       FNAME(init_walker)(&walker, vcpu);
-       guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL,
-                                       vaddr);
-       FNAME(release_walker)(&walker);
+       FNAME(init_walker) (&walker, vcpu);
+       guest_pte = *FNAME(fetch_guest) (vcpu, &walker, PT_PAGE_TABLE_LEVEL, vaddr);
+       FNAME(release_walker) (&walker);
 
        if (!is_present_pte(guest_pte))
                return UNMAPPED_GVA;
@@ -355,11 +342,13 @@ static gpa_t FNAME(gva_to_gpa)(struct litevm_vcpu *vcpu, gva_t vaddr)
                ASSERT(PTTYPE == 64 || is_pse());
 
                gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
-                       (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));
+                                                                                                        (PT_LEVEL_MASK
+                                                                                                         (PT_PAGE_TABLE_LEVEL) |
+                                                                                                         ~PAGE_MASK));
 
                if (PTTYPE == 32 && is_cpuid_PSE36())
                        gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
-                                       (32 - PT32_DIR_PSE36_SHIFT);
+                               (32 - PT32_DIR_PSE36_SHIFT);
        } else {
                gpa = (guest_pte & PT_BASE_ADDR_MASK);
                gpa |= (vaddr & ~PAGE_MASK);
index b573ab6..2c3a974 100644 (file)
  *  hfn - host frame number
  */
 
-typedef unsigned long  gva_t;
-typedef uint64_t            gpa_t;
-typedef unsigned long  gfn_t;
+typedef unsigned long gva_t;
+typedef uint64_t gpa_t;
+typedef unsigned long gfn_t;
 
-typedef unsigned long  hva_t;
-typedef uint64_t            hpa_t;
-typedef unsigned long  hfn_t;
+typedef unsigned long hva_t;
+typedef uint64_t hpa_t;
+typedef unsigned long hfn_t;
 
 struct litevm_mmu_page {
        LIST_ENTRY(litevm_mmu_page) link;
        hpa_t page_hpa;
-       unsigned long slot_bitmap; /* One bit set per slot which has memory
-                                   * in this shadow page.
-                                   */
-       int global;              /* Set if all ptes in this page are global */
+       unsigned long slot_bitmap;      /* One bit set per slot which has memory
+                                                                * in this shadow page.
+                                                                */
+       int global;                                     /* Set if all ptes in this page are global */
        uint64_t *parent_pte;
 };
 
@@ -108,11 +108,11 @@ struct litevm_vcpu;
  * mode.
  */
 struct litevm_mmu {
-       void (*new_cr3)(struct litevm_vcpu *vcpu);
-       int (*page_fault)(struct litevm_vcpu *vcpu, gva_t gva, uint32_t err);
-       void (*inval_page)(struct litevm_vcpu *vcpu, gva_t gva);
-       void (*free)(struct litevm_vcpu *vcpu);
-       gpa_t (*gva_to_gpa)(struct litevm_vcpu *vcpu, gva_t gva);
+       void (*new_cr3) (struct litevm_vcpu * vcpu);
+       int (*page_fault) (struct litevm_vcpu * vcpu, gva_t gva, uint32_t err);
+       void (*inval_page) (struct litevm_vcpu * vcpu, gva_t gva);
+       void (*free) (struct litevm_vcpu * vcpu);
+        gpa_t(*gva_to_gpa) (struct litevm_vcpu * vcpu, gva_t gva);
        hpa_t root_hpa;
        int root_level;
        int shadow_root_level;
@@ -150,13 +150,13 @@ struct litevm_vcpu {
        struct litevm *litevm;
        struct vmcs *vmcs;
        qlock_t mutex;
-       int   cpu;
-       int   launched;
-       unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
+       int cpu;
+       int launched;
+       unsigned long irq_summary;      /* bit vector: 1 per word in irq_pending */
 #define NR_IRQ_WORDS (256 / BITS_PER_LONG)
        unsigned long irq_pending[NR_IRQ_WORDS];
-       unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
-       unsigned long rip;      /* needs vcpu_load_rsp_rip() */
+       unsigned long regs[NR_VCPU_REGS];       /* for rsp: vcpu_load_rsp_rip() */
+       unsigned long rip;                      /* needs vcpu_load_rsp_rip() */
 
        unsigned long cr2;
        unsigned long cr3;
@@ -166,7 +166,7 @@ struct litevm_vcpu {
        int nmsrs;
        struct vmx_msr_entry *guest_msrs;
        struct vmx_msr_entry *host_msrs;
-       LIST_HEAD(free_pages, litevm_mmu_page) link;
+        LIST_HEAD(free_pages, litevm_mmu_page) link;
        //struct list_head free_pages;
        struct litevm_mmu_page page_header_buf[LITEVM_NUM_MMU_PAGES];
        struct litevm_mmu mmu;
@@ -184,7 +184,7 @@ struct litevm_vcpu {
        unsigned char mmio_data[8];
        gpa_t mmio_phys_addr;
 
-       struct{
+       struct {
                int active;
                uint8_t save_iopl;
                struct {
@@ -201,14 +201,14 @@ struct litevm_memory_slot {
        unsigned long flags;
        struct page **phys_mem;
 //#warning "bitmap is u8. "
-       /*unsigned long*/uint8_t *dirty_bitmap;
+       /*unsigned long */ uint8_t *dirty_bitmap;
 };
 
 struct litevm {
-       spinlock_t lock; /* protects everything except vcpus */
+       spinlock_t lock;                        /* protects everything except vcpus */
        int nmemslots;
        struct litevm_memory_slot memslots[LITEVM_MEMORY_SLOTS];
-       LIST_HEAD(active_mmu_pages, litevm_mmu_page) link;
+        LIST_HEAD(active_mmu_pages, litevm_mmu_page) link;
        //struct list_head active_mmu_pages;
        struct litevm_vcpu vcpus[LITEVM_MAX_VCPUS];
        int memory_config_version;
@@ -242,12 +242,17 @@ void litevm_mmu_slot_remove_write_access(struct litevm *litevm, int slot);
 hpa_t gpa_to_hpa(struct litevm_vcpu *vcpu, gpa_t gpa);
 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
-static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
-hpa_t gva_to_hpa(struct litevm_vcpu *vcpu, gva_t gva);
+static inline int is_error_hpa(hpa_t hpa)
+{
+       return hpa >> HPA_MSB;
+}
+
+hpa_t gva_to_hpa(struct litevm_vcpu * vcpu, gva_t gva);
 
 extern hpa_t bad_page_address;
 
-static inline struct page *gfn_to_page(struct litevm_memory_slot *slot, gfn_t gfn)
+static inline struct page *gfn_to_page(struct litevm_memory_slot *slot,
+                                                                          gfn_t gfn)
 {
        return slot->phys_mem[gfn - slot->base_gfn];
 }
@@ -255,24 +260,22 @@ static inline struct page *gfn_to_page(struct litevm_memory_slot *slot, gfn_t gf
 struct litevm_memory_slot *gfn_to_memslot(struct litevm *litevm, gfn_t gfn);
 void mark_page_dirty(struct litevm *litevm, gfn_t gfn);
 
-void realmode_lgdt(struct litevm_vcpu *vcpu, uint16_t size, unsigned long address);
-void realmode_lidt(struct litevm_vcpu *vcpu, uint16_t size, unsigned long address);
+void realmode_lgdt(struct litevm_vcpu *vcpu, uint16_t size,
+                                  unsigned long address);
+void realmode_lidt(struct litevm_vcpu *vcpu, uint16_t size,
+                                  unsigned long address);
 void realmode_lmsw(struct litevm_vcpu *vcpu, unsigned long msw,
-                  unsigned long *rflags);
+                                  unsigned long *rflags);
 
 unsigned long realmode_get_cr(struct litevm_vcpu *vcpu, int cr);
 void realmode_set_cr(struct litevm_vcpu *vcpu, int cr, unsigned long value,
-                    unsigned long *rflags);
+                                        unsigned long *rflags);
 
 int litevm_read_guest(struct litevm_vcpu *vcpu,
-              gva_t addr,
-              unsigned long size,
-              void *dest);
+                                         gva_t addr, unsigned long size, void *dest);
 
 int litevm_write_guest(struct litevm_vcpu *vcpu,
-               gva_t addr,
-               unsigned long size,
-               void *data);
+                                          gva_t addr, unsigned long size, void *data);
 
 void vmcs_writel(unsigned long field, unsigned long value);
 unsigned long vmcs_readl(unsigned long field);
@@ -292,7 +295,7 @@ static inline uint64_t vmcs_read64(unsigned long field)
 #ifdef __x86_64__
        return vmcs_readl(field);
 #else
-       return vmcs_readl(field) | ((uint64_t)vmcs_readl(field+1) << 32);
+       return vmcs_readl(field) | ((uint64_t) vmcs_readl(field + 1) << 32);
 #endif
 }
 
@@ -341,7 +344,7 @@ static inline int is_paging(void)
 static inline int is_page_fault(uint32_t intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
+                                                INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
 }
 
@@ -356,7 +359,8 @@ static inline void flush_guest_tlb(struct litevm_vcpu *vcpu)
        vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
 }
 
-static inline int memslot_id(struct litevm *litevm, struct litevm_memory_slot *slot)
+static inline int memslot_id(struct litevm *litevm,
+                                                        struct litevm_memory_slot *slot)
 {
        return slot - litevm->memslots;
 }
@@ -393,14 +397,13 @@ struct litevm_memory_region {
        uint32_t slot;
        uint32_t flags;
        uint64_t guest_phys_addr;
-       uint64_t memory_size; /* bytes */
+       uint64_t memory_size;           /* bytes */
        void *init_data;
 };
 
 /* for litevm_memory_region::flags */
 #define LITEVM_MEM_LOG_DIRTY_PAGES  1UL
 
-
 #define LITEVM_EXIT_TYPE_FAIL_ENTRY 1
 #define LITEVM_EXIT_TYPE_VM_EXIT    2
 
@@ -418,8 +421,8 @@ enum litevm_exit_reason {
 struct litevm_run {
        /* in */
        uint32_t vcpu;
-       uint32_t emulated;  /* skip current instruction */
-       uint32_t mmio_completed; /* mmio request completed */
+       uint32_t emulated;                      /* skip current instruction */
+       uint32_t mmio_completed;        /* mmio request completed */
 
        /* out */
        uint32_t exit_type;
@@ -440,7 +443,7 @@ struct litevm_run {
 #define LITEVM_EXIT_IO_IN  0
 #define LITEVM_EXIT_IO_OUT 1
                        uint8_t direction;
-                       uint8_t size; /* bytes */
+                       uint8_t size;           /* bytes */
                        uint8_t string;
                        uint8_t string_down;
                        uint8_t rep;
@@ -457,9 +460,9 @@ struct litevm_run {
                /* LITEVM_EXIT_MMIO */
                struct {
                        uint64_t phys_addr;
-                       uint8_t  data[8];
+                       uint8_t data[8];
                        uint32_t len;
-                       uint8_t  is_write;
+                       uint8_t is_write;
                } mmio;
        };
 };
@@ -473,7 +476,7 @@ struct litevm_regs {
        /* out (LITEVM_GET_REGS) / in (LITEVM_SET_REGS) */
        uint64_t rax, rbx, rcx, rdx;
        uint64_t rsi, rdi, rsp, rbp;
-       uint64_t r8,  r9,  r10, r11;
+       uint64_t r8, r9, r10, r11;
        uint64_t r12, r13, r14, r15;
        uint64_t rip, rflags;
 };
@@ -482,10 +485,10 @@ struct litevm_segment {
        uint64_t base;
        uint32_t limit;
        uint16_t selector;
-       uint8_t  type;
-       uint8_t  present, dpl, db, s, l, g, avl;
-       uint8_t  unusable;
-       uint8_t  padding;
+       uint8_t type;
+       uint8_t present, dpl, db, s, l, g, avl;
+       uint8_t unusable;
+       uint8_t padding;
 };
 
 struct litevm_dtable {
@@ -522,9 +525,9 @@ struct litevm_translation {
 
        /* out */
        uint64_t physical_address;
-       uint8_t  valid;
-       uint8_t  writeable;
-       uint8_t  usermode;
+       uint8_t valid;
+       uint8_t writeable;
+       uint8_t usermode;
 };
 
 /* for LITEVM_INTERRUPT */
@@ -554,7 +557,7 @@ struct litevm_dirty_log {
        uint32_t slot;
        uint32_t padding;
        union {
-               void *dirty_bitmap; /* one bit per page */
+               void *dirty_bitmap;             /* one bit per page */
                uint64_t paddingw;
        };
 };
@@ -577,6 +580,6 @@ struct litevm *vmx_open(void);
 int vmx_create_vcpu(struct litevm *litevm, int n);
 int vmx_init(void);
 int vm_set_memory_region(struct litevm *litevm,
-                        struct litevm_memory_region *mem);
+                                                struct litevm_memory_region *mem);
 int vm_run(struct litevm *litevm, struct litevm_run *litevm_run);
 #endif
index 30b6548..49e5ae4 100644 (file)
@@ -48,9 +48,9 @@ struct x86_emulate_ctxt;
 /* Access is unhandleable: bail from emulation and return error to caller. */
 #define X86EMUL_UNHANDLEABLE    1
 /* Terminate emulation but return success to the caller. */
-#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
-#define X86EMUL_RETRY_INSTR     2 /* retry the instruction for some reason */
-#define X86EMUL_CMPXCHG_FAILED  2 /* cmpxchg did not see expected value */
+#define X86EMUL_PROPAGATE_FAULT 2      /* propagate a generated fault to guest */
+#define X86EMUL_RETRY_INSTR     2      /* retry the instruction for some reason */
+#define X86EMUL_CMPXCHG_FAILED  2      /* cmpxchg did not see expected value */
 struct x86_emulate_ops {
        /*
         * read_std: Read bytes of standard (non-emulated/special) memory.
@@ -59,9 +59,9 @@ struct x86_emulate_ops {
         *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
         *  @bytes: [IN ] Number of bytes to read from memory.
         */
-       int (*read_std)(unsigned long addr,
-                       unsigned long *val,
-                       unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+       int (*read_std) (unsigned long addr,
+                                        unsigned long *val,
+                                        unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -71,9 +71,9 @@ struct x86_emulate_ops {
         *                required).
         *  @bytes: [IN ] Number of bytes to write to memory.
         */
-       int (*write_std)(unsigned long addr,
-                        unsigned long val,
-                        unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+       int (*write_std) (unsigned long addr,
+                                         unsigned long val,
+                                         unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * read_emulated: Read bytes from emulated/special memory area.
@@ -82,9 +82,8 @@ struct x86_emulate_ops {
         *  @bytes: [IN ] Number of bytes to read from memory.
         */
        int (*read_emulated) (unsigned long addr,
-                             unsigned long *val,
-                             unsigned int bytes,
-                             struct x86_emulate_ctxt * ctxt);
+                                                 unsigned long *val,
+                                                 unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * write_emulated: Read bytes from emulated/special memory area.
@@ -94,9 +93,8 @@ struct x86_emulate_ops {
         *  @bytes: [IN ] Number of bytes to write to memory.
         */
        int (*write_emulated) (unsigned long addr,
-                              unsigned long val,
-                              unsigned int bytes,
-                              struct x86_emulate_ctxt * ctxt);
+                                                  unsigned long val,
+                                                  unsigned int bytes, struct x86_emulate_ctxt * ctxt);
 
        /*
         * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -107,10 +105,10 @@ struct x86_emulate_ops {
         *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
         */
        int (*cmpxchg_emulated) (unsigned long addr,
-                                unsigned long old,
-                                unsigned long new,
-                                unsigned int bytes,
-                                struct x86_emulate_ctxt * ctxt);
+                                                        unsigned long old,
+                                                        unsigned long new,
+                                                        unsigned int bytes,
+                                                        struct x86_emulate_ctxt * ctxt);
 
        /*
         * cmpxchg8b_emulated: Emulate an atomic (LOCKed) CMPXCHG8B operation on an
@@ -125,11 +123,11 @@ struct x86_emulate_ops {
         *     to defining a function that always returns X86EMUL_UNHANDLEABLE.
         */
        int (*cmpxchg8b_emulated) (unsigned long addr,
-                                  unsigned long old_lo,
-                                  unsigned long old_hi,
-                                  unsigned long new_lo,
-                                  unsigned long new_hi,
-                                  struct x86_emulate_ctxt * ctxt);
+                                                          unsigned long old_lo,
+                                                          unsigned long old_hi,
+                                                          unsigned long new_lo,
+                                                          unsigned long new_hi,
+                                                          struct x86_emulate_ctxt * ctxt);
 };
 
 struct cpu_user_regs;
@@ -172,14 +170,13 @@ struct x86_emulate_ctxt {
  * Returns -1 on failure, 0 on success.
  */
 int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
-                     struct x86_emulate_ops *ops);
+                                         struct x86_emulate_ops *ops);
 
 /*
  * Given the 'reg' portion of a ModRM byte, and a register block, return a
  * pointer into the block that addresses the relevant register.
  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
  */
-void *decode_register(u8 modrm_reg, unsigned long *regs,
-                     int highbyte_regs);
+void *decode_register(u8 modrm_reg, unsigned long *regs, int highbyte_regs);
 
-#endif                         /* __X86_EMULATE_H__ */
+#endif /* __X86_EMULATE_H__ */
index 76daa4d..56aafc0 100644 (file)
@@ -43,16 +43,17 @@ static struct litevm_stats_debugfs_item {
        const char *name;
        uint32_t *data;
 } debugfs_entries[] = {
-       { "pf_fixed", &litevm_stat.pf_fixed },
-       { "pf_guest", &litevm_stat.pf_guest },
-       { "tlb_flush", &litevm_stat.tlb_flush },
-       { "invlpg", &litevm_stat.invlpg },
-       { "exits", &litevm_stat.exits },
-       { "io_exits", &litevm_stat.io_exits },
-       { "mmio_exits", &litevm_stat.mmio_exits },
-       { "signal_exits", &litevm_stat.signal_exits },
-       { "irq_exits", &litevm_stat.irq_exits },
-       { 0, 0 }
+       {
+       "pf_fixed", &litevm_stat.pf_fixed}, {
+       "pf_guest", &litevm_stat.pf_guest}, {
+       "tlb_flush", &litevm_stat.tlb_flush}, {
+       "invlpg", &litevm_stat.invlpg}, {
+       "exits", &litevm_stat.exits}, {
+       "io_exits", &litevm_stat.io_exits}, {
+       "mmio_exits", &litevm_stat.mmio_exits}, {
+       "signal_exits", &litevm_stat.signal_exits}, {
+       "irq_exits", &litevm_stat.irq_exits}, {
+       0, 0}
 };
 
 static struct dentry *debugfs_dir;
@@ -61,8 +62,9 @@ static const uint32_t vmx_msr_index[] = {
 #ifdef __x86_64__
        MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
 #endif
-       MSR_EFER, // wtf? MSR_K6_STAR,
+       MSR_EFER,       // wtf? MSR_K6_STAR,
 };
+
 #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index))
 
 #ifdef __x86_64__
@@ -110,14 +112,14 @@ static const uint32_t vmx_msr_index[] = {
 static inline unsigned long __ffs(unsigned long word)
 {
        print_func_entry();
-       asm("rep; bsf %1,%0"
-               : "=r" (word)
-               : "rm" (word));
+asm("rep; bsf %1,%0":"=r"(word)
+:              "rm"(word));
        print_func_exit();
        return word;
 }
 
-static struct vmx_msr_entry *find_msr_entry(struct litevm_vcpu *vcpu, uint32_t msr)
+static struct vmx_msr_entry *find_msr_entry(struct litevm_vcpu *vcpu,
+                                                                                       uint32_t msr)
 {
        print_func_entry();
        int i;
@@ -134,19 +136,19 @@ static struct vmx_msr_entry *find_msr_entry(struct litevm_vcpu *vcpu, uint32_t m
 struct descriptor_table {
        uint16_t limit;
        unsigned long base;
-} __attribute__((packed));
+} __attribute__ ((packed));
 
 static void get_gdt(struct descriptor_table *table)
 {
        print_func_entry();
-       asm ("sgdt %0" : "=m"(*table));
+asm("sgdt %0":"=m"(*table));
        print_func_exit();
 }
 
 static void get_idt(struct descriptor_table *table)
 {
        print_func_entry();
-       asm ("sidt %0" : "=m"(*table));
+asm("sidt %0":"=m"(*table));
        print_func_exit();
 }
 
@@ -154,7 +156,7 @@ static uint16_t read_fs(void)
 {
        print_func_entry();
        uint16_t seg;
-       asm ("mov %%fs, %0" : "=g"(seg));
+asm("mov %%fs, %0":"=g"(seg));
        print_func_exit();
        return seg;
 }
@@ -163,7 +165,7 @@ static uint16_t read_gs(void)
 {
        print_func_entry();
        uint16_t seg;
-       asm ("mov %%gs, %0" : "=g"(seg));
+asm("mov %%gs, %0":"=g"(seg));
        print_func_exit();
        return seg;
 }
@@ -172,7 +174,7 @@ static uint16_t read_ldt(void)
 {
        print_func_entry();
        uint16_t ldt;
-       asm ("sldt %0" : "=g"(ldt));
+asm("sldt %0":"=g"(ldt));
        print_func_exit();
        return ldt;
 }
@@ -180,14 +182,14 @@ static uint16_t read_ldt(void)
 static void load_fs(uint16_t sel)
 {
        print_func_entry();
-       asm ("mov %0, %%fs" : : "g"(sel));
+asm("mov %0, %%fs": :"g"(sel));
        print_func_exit();
 }
 
 static void load_gs(uint16_t sel)
 {
        print_func_entry();
-       asm ("mov %0, %%gs" : : "g"(sel));
+asm("mov %0, %%gs": :"g"(sel));
        print_func_exit();
 }
 
@@ -195,7 +197,7 @@ static void load_gs(uint16_t sel)
 static void load_ldt(uint16_t sel)
 {
        print_func_entry();
-       asm ("lldt %0" : : "g"(sel));
+asm("lldt %0": :"g"(sel));
        print_func_exit();
 }
 #endif
@@ -203,39 +205,39 @@ static void load_ldt(uint16_t sel)
 static void fx_save(void *image)
 {
        print_func_entry();
-       asm ("fxsave (%0)":: "r" (image));
+       asm("fxsave (%0)"::"r"(image));
        print_func_exit();
 }
 
 static void fx_restore(void *image)
 {
        print_func_entry();
-       asm ("fxrstor (%0)":: "r" (image));
+       asm("fxrstor (%0)"::"r"(image));
        print_func_exit();
 }
 
 static void fpu_init(void)
 {
        print_func_entry();
-       asm ("finit");
+       asm("finit");
        print_func_exit();
 }
 
 struct segment_descriptor {
        uint16_t limit_low;
        uint16_t base_low;
-       uint8_t  base_mid;
-       uint8_t  type : 4;
-       uint8_t  system : 1;
-       uint8_t  dpl : 2;
-       uint8_t  present : 1;
-       uint8_t  limit_high : 4;
-       uint8_t  avl : 1;
-       uint8_t  long_mode : 1;
-       uint8_t  default_op : 1;
-       uint8_t  granularity : 1;
-       uint8_t  base_high;
-} __attribute__((packed));
+       uint8_t base_mid;
+       uint8_t type:4;
+       uint8_t system:1;
+       uint8_t dpl:2;
+       uint8_t present:1;
+       uint8_t limit_high:4;
+       uint8_t avl:1;
+       uint8_t long_mode:1;
+       uint8_t default_op:1;
+       uint8_t granularity:1;
+       uint8_t base_high;
+} __attribute__ ((packed));
 
 #ifdef __x86_64__
 // LDT or TSS descriptor in the GDT. 16 bytes.
@@ -256,21 +258,20 @@ static unsigned long segment_base(uint16_t selector)
        typedef unsigned long ul;
        unsigned long v;
 
-       asm ("sgdt %0" : "=m"(gdt));
+asm("sgdt %0":"=m"(gdt));
        table_base = gdt.base;
 
-       if (selector & 4) {           /* from ldt */
+       if (selector & 4) {     /* from ldt */
                uint16_t ldt_selector;
 
-               asm ("sldt %0" : "=g"(ldt_selector));
+asm("sldt %0":"=g"(ldt_selector));
                table_base = segment_base(ldt_selector);
        }
        d = (struct segment_descriptor *)(table_base + (selector & ~7));
-       v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
+       v = d->base_low | ((ul) d->base_mid << 16) | ((ul) d->base_high << 24);
 #ifdef __x86_64__
-       if (d->system == 0
-           && (d->type == 2 || d->type == 9 || d->type == 11))
-               v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
+       if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+               v |= ((ul) ((struct segment_descriptor_64 *)d)->base_higher) << 32;
 #endif
        print_func_exit();
        return v;
@@ -280,14 +281,14 @@ static unsigned long read_tr_base(void)
 {
        print_func_entry();
        uint16_t tr;
-       asm ("str %0" : "=g"(tr));
+asm("str %0":"=g"(tr));
        print_func_exit();
        return segment_base(tr);
 }
 
 static void reload_tss(void)
 {
-print_func_entry();
+       print_func_entry();
 #ifndef __x86_64__
 
        /*
@@ -298,10 +299,10 @@ print_func_entry();
 
        get_gdt(&gdt);
        descs = (void *)gdt.base;
-       descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+       descs[GDT_ENTRY_TSS].type = 9;  /* available TSS */
        load_TR_desc();
 #endif
-print_func_exit();
+       print_func_exit();
 }
 
 static struct vmcs_descriptor {
@@ -318,12 +319,8 @@ static inline struct page *_gfn_to_page(struct litevm *litevm, gfn_t gfn)
        return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : 0;
 }
 
-
-
 int litevm_read_guest(struct litevm_vcpu *vcpu,
-                            gva_t addr,
-                            unsigned long size,
-                            void *dest)
+                                         gva_t addr, unsigned long size, void *dest)
 {
        print_func_entry();
        unsigned char *host_buf = dest;
@@ -339,11 +336,11 @@ int litevm_read_guest(struct litevm_vcpu *vcpu,
 
                if (is_error_hpa(paddr))
                        break;
-               guest_buf = (hva_t)KADDR(paddr);
+               guest_buf = (hva_t) KADDR(paddr);
                offset = addr & ~PAGE_MASK;
                guest_buf |= offset;
                now = MIN(size, PAGE_SIZE - offset);
-               memcpy(host_buf, (void*)guest_buf, now);
+               memcpy(host_buf, (void *)guest_buf, now);
                host_buf += now;
                addr += now;
                size -= now;
@@ -353,9 +350,7 @@ int litevm_read_guest(struct litevm_vcpu *vcpu,
 }
 
 int litevm_write_guest(struct litevm_vcpu *vcpu,
-                            gva_t addr,
-                            unsigned long size,
-                            void *data)
+                                          gva_t addr, unsigned long size, void *data)
 {
        print_func_entry();
        unsigned char *host_buf = data;
@@ -372,11 +367,11 @@ int litevm_write_guest(struct litevm_vcpu *vcpu,
                if (is_error_hpa(paddr))
                        break;
 
-               guest_buf = (hva_t)KADDR(paddr);
+               guest_buf = (hva_t) KADDR(paddr);
                offset = addr & ~PAGE_MASK;
                guest_buf |= offset;
                now = MIN(size, PAGE_SIZE - offset);
-               memcpy((void*)guest_buf, host_buf, now);
+               memcpy((void *)guest_buf, host_buf, now);
                host_buf += now;
                addr += now;
                size -= now;
@@ -391,12 +386,12 @@ static void setup_vmcs_descriptor(void)
        uint64_t msr;
 
        msr = read_msr(MSR_IA32_VMX_BASIC_MSR);
-       vmcs_descriptor.size = (msr>>32) & 0x1fff;
-       vmcs_descriptor.order = LOG2_UP(vmcs_descriptor.size>>PAGE_SHIFT);
-       vmcs_descriptor.revision_id = (uint32_t)msr;
+       vmcs_descriptor.size = (msr >> 32) & 0x1fff;
+       vmcs_descriptor.order = LOG2_UP(vmcs_descriptor.size >> PAGE_SHIFT);
+       vmcs_descriptor.revision_id = (uint32_t) msr;
        printk("setup_vmcs_descriptor: msr 0x%x, size 0x%x order 0x%x id 0x%x\n",
-              msr, vmcs_descriptor.size, vmcs_descriptor.order,
-              vmcs_descriptor.revision_id);
+                  msr, vmcs_descriptor.size, vmcs_descriptor.order,
+                  vmcs_descriptor.revision_id);
        print_func_exit();
 };
 
@@ -406,11 +401,10 @@ static void vmcs_clear(struct vmcs *vmcs)
        uint64_t phys_addr = PADDR(vmcs);
        uint8_t error;
        printk("%d: vmcs %p phys_addr %p\n", core_id(), vmcs, (void *)phys_addr);
-       asm volatile ("vmclear %1; setna %0"
-                      : "=m"(error) : "m"(phys_addr) : "cc", "memory" );
+       asm volatile ("vmclear %1; setna %0":"=m" (error):"m"(phys_addr):"cc",
+                                 "memory");
        if (error)
-               printk("litevm: vmclear fail: %p/%llx\n",
-                      vmcs, phys_addr);
+               printk("litevm: vmclear fail: %p/%llx\n", vmcs, phys_addr);
        print_func_exit();
 }
 
@@ -419,8 +413,9 @@ static void __vcpu_clear(struct hw_trapframe *hw_tf, void *arg)
        print_func_entry();
        struct litevm_vcpu *vcpu = arg;
        int cpu = core_id();
-       printd("__vcpu_clear: cpu %d vcpu->cpu %d currentcpu->vmcs %p vcpu->vmcs %p\n", 
-              cpu, vcpu->cpu, currentcpu->vmcs, vcpu->vmcs);
+       printd
+               ("__vcpu_clear: cpu %d vcpu->cpu %d currentcpu->vmcs %p vcpu->vmcs %p\n",
+                cpu, vcpu->cpu, currentcpu->vmcs, vcpu->vmcs);
 
        if (vcpu->cpu == cpu)
                vmcs_clear(vcpu->vmcs);
@@ -458,13 +453,10 @@ static struct litevm_vcpu *__vcpu_load(struct litevm_vcpu *vcpu)
                uint8_t error;
 
                currentcpu->vmcs = vcpu->vmcs;
-               asm volatile ("vmptrld %1; setna %0"
-                              : "=m"(error) : "m"(phys_addr) : "cc" );
-               if (error){
-                       printk("litevm: vmptrld %p/%llx fail\n",
-                              vcpu->vmcs, phys_addr);
-                       error("litevm: vmptrld %p/%llx fail\n",
-                              vcpu->vmcs, phys_addr);
+               asm volatile ("vmptrld %1; setna %0":"=m" (error):"m"(phys_addr):"cc");
+               if (error) {
+                       printk("litevm: vmptrld %p/%llx fail\n", vcpu->vmcs, phys_addr);
+                       error("litevm: vmptrld %p/%llx fail\n", vcpu->vmcs, phys_addr);
                }
        }
 
@@ -477,12 +469,12 @@ static struct litevm_vcpu *__vcpu_load(struct litevm_vcpu *vcpu)
                 * Linux uses per-cpu TSS and GDT, so set these when switching
                 * processors.
                 */
-               vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
+               vmcs_writel(HOST_TR_BASE, read_tr_base());      /* 22.2.4 */
                get_gdt(&dt);
-               vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
+               vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
 
                sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
-               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+               vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp);      /* 22.2.3 */
        }
        print_func_exit();
        return vcpu;
@@ -515,7 +507,6 @@ static void vcpu_put(struct litevm_vcpu *vcpu)
        print_func_exit();
 }
 
-
 static struct vmcs *alloc_vmcs_cpu(int cpu)
 {
        print_func_entry();
@@ -528,7 +519,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
                return 0;
        }
        memset(vmcs, 0, vmcs_descriptor.size);
-       vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
+       vmcs->revision_id = vmcs_descriptor.revision_id;        /* vmcs revision id */
        print_func_exit();
        return vmcs;
 }
@@ -547,7 +538,7 @@ static int cpu_has_litevm_support(void)
        print_func_entry();
        uint32_t ecx = cpuid_ecx(1);
        print_func_exit();
-       return ecx & 5; /* CPUID.1:ECX.VMX[bit 5] -> VT */
+       return ecx & 5; /* CPUID.1:ECX.VMX[bit 5] -> VT */
 }
 
 static int vmx_disabled_by_bios(void)
@@ -557,7 +548,7 @@ static int vmx_disabled_by_bios(void)
 
        msr = read_msr(MSR_IA32_FEATURE_CONTROL);
        print_func_exit();
-       return (msr & 5) == 1; /* locked but not enabled */
+       return (msr & 5) == 1;  /* locked but not enabled */
 }
 
 static void vm_enable(struct hw_trapframe *hw_tf, void *garbage)
@@ -568,40 +559,40 @@ static void vm_enable(struct hw_trapframe *hw_tf, void *garbage)
        uint64_t old;
        uint64_t status = 0;
        currentcpu->vmxarea = get_cont_pages_node(core_id(), vmcs_descriptor.order,
-                                                 KMALLOC_WAIT);
-       if (! currentcpu->vmxarea)
+                                                                                         KMALLOC_WAIT);
+       if (!currentcpu->vmxarea)
                return;
        memset(currentcpu->vmxarea, 0, vmcs_descriptor.size);
        currentcpu->vmxarea->revision_id = vmcs_descriptor.revision_id;
        phys_addr = PADDR(currentcpu->vmxarea);
        printk("%d: currentcpu->vmxarea %p phys_addr %p\n", core_id(),
-              currentcpu->vmxarea, (void *)phys_addr);
-       if (phys_addr & 0xfff){
+                  currentcpu->vmxarea, (void *)phys_addr);
+       if (phys_addr & 0xfff) {
                printk("fix vmxarea alignment!");
        }
        printk("%d: CR4 is 0x%x, and VMXE is %x\n", core_id(), rcr4(), CR4_VMXE);
        old = read_msr(MSR_IA32_FEATURE_CONTROL);
        printk("%d: vm_enable, old is %d\n", core_id(), old);
-       if ((old & 5) == 0){
+       if ((old & 5) == 0) {
                /* enable and lock */
                write_msr(MSR_IA32_FEATURE_CONTROL, old | 5);
                old = read_msr(MSR_IA32_FEATURE_CONTROL);
                printk("%d:vm_enable, tried to set 5, old is %d\n", core_id(), old);
        }
        printk("%d:CR4 is 0x%x, and VMXE is %x\n", core_id(), rcr4(), CR4_VMXE);
-       lcr4(rcr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
+       lcr4(rcr4() | CR4_VMXE);        /* FIXME: not cpu hotplug safe */
        printk("%d:CR4 is 0x%x, and VMXE is %x\n", core_id(), rcr4(), CR4_VMXE);
        printk("%d:cr0 is %x\n", core_id(), rcr0());
        lcr0(rcr0() | 0x20);
        printk("%d:cr0 is %x\n", core_id(), rcr0());
        printk("%d:A20 is %d (0x2 should be set)\n", core_id(), inb(0x92));
-       outb(0x92, inb(0x92)|2);
+       outb(0x92, inb(0x92) | 2);
        printk("%d:A20 is %d (0x2 should be set)\n", core_id(), inb(0x92));
-       asm volatile ("vmxon %1\njbe 1f\nmovl $1, %0\n1:"       \
-                     : "=m" (status) : "m"(phys_addr) : "memory", "cc");
+       asm volatile ("vmxon %1\njbe 1f\nmovl $1, %0\n1:":"=m" (status):"m"
+                                 (phys_addr):"memory", "cc");
        printk("%d:vmxon status is %d\n", core_id(), status);
        printk("%d:CR4 is 0x%x, and VMXE is %x\n", core_id(), rcr4(), CR4_VMXE);
-       if (! status){
+       if (!status) {
                printk("%d:vm_enable: status says fail\n", core_id());
        }
        print_func_exit();
@@ -610,7 +601,7 @@ static void vm_enable(struct hw_trapframe *hw_tf, void *garbage)
 static void litevm_disable(void *garbage)
 {
        print_func_entry();
-       asm volatile ("vmxoff" : : : "cc");
+       asm volatile ("vmxoff":::"cc");
        print_func_exit();
 }
 
@@ -646,14 +637,14 @@ struct litevm *vmx_open(void)
  * Free any memory in @free but not in @dont.
  */
 static void litevm_free_physmem_slot(struct litevm_memory_slot *free,
-                                 struct litevm_memory_slot *dont)
+                                                                        struct litevm_memory_slot *dont)
 {
        print_func_entry();
        int i;
 
        if (!dont || free->phys_mem != dont->phys_mem)
                if (free->phys_mem) {
-                       for (i = 0; i < free->npages; ++i){
+                       for (i = 0; i < free->npages; ++i) {
                                page_t *page = free->phys_mem[i];
                                page_decref(page);
                                assert(page_is_free(page2ppn(page)));
@@ -713,7 +704,7 @@ static void litevm_free_vcpus(struct litevm *litevm)
 
 static int litevm_dev_release(struct litevm *litevm)
 {
-print_func_entry();
+       print_func_entry();
 
        litevm_free_vcpus(litevm);
        litevm_free_physmem(litevm);
@@ -727,7 +718,7 @@ unsigned long vmcs_readl(unsigned long field)
        print_func_entry();
        unsigned long value;
 
-       asm volatile ("vmread %1, %0" : "=g"(value) : "r"(field) : "cc");
+       asm volatile ("vmread %1, %0":"=g" (value):"r"(field):"cc");
        print_func_exit();
        return value;
 }
@@ -737,11 +728,11 @@ void vmcs_writel(unsigned long field, unsigned long value)
        print_func_entry();
        uint8_t error;
 
-       asm volatile ("vmwrite %1, %2; setna %0"
-                      : "=g"(error) : "r"(value), "r"(field) : "cc" );
+       asm volatile ("vmwrite %1, %2; setna %0":"=g" (error):"r"(value),
+                                 "r"(field):"cc");
        if (error)
                printk("vmwrite error: reg %lx value %lx (err %d)\n",
-                      field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
+                          field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
        print_func_exit();
 }
 
@@ -754,28 +745,26 @@ static void vmcs_write16(unsigned long field, uint16_t value)
 
 static void vmcs_write64(unsigned long field, uint64_t value)
 {
-print_func_entry();
+       print_func_entry();
 #ifdef __x86_64__
        vmcs_writel(field, value);
 #else
        vmcs_writel(field, value);
        asm volatile ("");
-       vmcs_writel(field+1, value >> 32);
+       vmcs_writel(field + 1, value >> 32);
 #endif
-print_func_exit();
+       print_func_exit();
 }
 
 static void inject_gp(struct litevm_vcpu *vcpu)
 {
        print_func_entry();
-       printd("inject_general_protection: rip 0x%lx\n",
-              vmcs_readl(GUEST_RIP));
+       printd("inject_general_protection: rip 0x%lx\n", vmcs_readl(GUEST_RIP));
        vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-                    GP_VECTOR |
-                    INTR_TYPE_EXCEPTION |
-                    INTR_INFO_DELIEVER_CODE_MASK |
-                    INTR_INFO_VALID_MASK);
+                                GP_VECTOR |
+                                INTR_TYPE_EXCEPTION |
+                                INTR_INFO_DELIEVER_CODE_MASK | INTR_INFO_VALID_MASK);
        print_func_exit();
 }
 
@@ -806,11 +795,11 @@ static void enter_pmode(struct litevm_vcpu *vcpu)
        vmcs_writel(GUEST_RFLAGS, flags);
 
        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
-                       (vmcs_readl(CR0_READ_SHADOW) & CR4_VME_MASK) );
+                               (vmcs_readl(CR0_READ_SHADOW) & CR4_VME_MASK));
 
        update_exception_bitmap(vcpu);
 
-       #define FIX_PMODE_DATASEG(seg, save) {                          \
+#define FIX_PMODE_DATASEG(seg, save) {                         \
                        vmcs_write16(GUEST_##seg##_SELECTOR, 0);        \
                        vmcs_writel(GUEST_##seg##_BASE, 0);             \
                        vmcs_write32(GUEST_##seg##_LIMIT, 0xffff);      \
@@ -824,15 +813,16 @@ static void enter_pmode(struct litevm_vcpu *vcpu)
        FIX_PMODE_DATASEG(FS, vcpu->rmode.fs);
 
        vmcs_write16(GUEST_CS_SELECTOR,
-                    vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
+                                vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
        vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
        print_func_exit();
 }
 
-static int rmode_tss_base(struct litevmlitevm)
+static int rmode_tss_base(struct litevm *litevm)
 {
        print_func_entry();
-       gfn_t base_gfn = litevm->memslots[0].base_gfn + litevm->memslots[0].npages - 3;
+       gfn_t base_gfn =
+               litevm->memslots[0].base_gfn + litevm->memslots[0].npages - 3;
        print_func_exit();
        return base_gfn << PAGE_SHIFT;
 }
@@ -862,7 +852,7 @@ static void enter_rmode(struct litevm_vcpu *vcpu)
        vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
        update_exception_bitmap(vcpu);
 
-       #define FIX_RMODE_SEG(seg, save) {                                 \
+#define FIX_RMODE_SEG(seg, save) {                                \
                vmcs_write16(GUEST_##seg##_SELECTOR,                       \
                                        vmcs_readl(GUEST_##seg##_BASE) >> 4); \
                vmcs_write32(GUEST_##seg##_LIMIT, 0xffff);                 \
@@ -880,7 +870,7 @@ static void enter_rmode(struct litevm_vcpu *vcpu)
        print_func_exit();
 }
 
-static int init_rmode_tss(struct litevmlitevm)
+static int init_rmode_tss(struct litevm *litevm)
 {
        print_func_entry();
        struct page *p1, *p2, *p3;
@@ -899,7 +889,7 @@ static int init_rmode_tss(struct litevm* litevm)
 
        page = page2kva(p1);
        memset(page, 0, PAGE_SIZE);
-       *(uint16_t*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
+       *(uint16_t *) (page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
 
        page = page2kva(p2);
        memset(page, 0, PAGE_SIZE);
@@ -922,14 +912,14 @@ static void __set_efer(struct litevm_vcpu *vcpu, uint64_t efer)
        vcpu->shadow_efer = efer;
        if (efer & EFER_LMA) {
                vmcs_write32(VM_ENTRY_CONTROLS,
-                                    vmcs_read32(VM_ENTRY_CONTROLS) |
-                                    VM_ENTRY_CONTROLS_IA32E_MASK);
+                                        vmcs_read32(VM_ENTRY_CONTROLS) |
+                                        VM_ENTRY_CONTROLS_IA32E_MASK);
                msr->data = efer;
 
        } else {
                vmcs_write32(VM_ENTRY_CONTROLS,
-                                    vmcs_read32(VM_ENTRY_CONTROLS) &
-                                    ~VM_ENTRY_CONTROLS_IA32E_MASK);
+                                        vmcs_read32(VM_ENTRY_CONTROLS) &
+                                        ~VM_ENTRY_CONTROLS_IA32E_MASK);
 
                msr->data = efer & ~EFER_LME;
        }
@@ -943,19 +933,16 @@ static void enter_lmode(struct litevm_vcpu *vcpu)
 
        guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
        if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
-               printd("%s: tss fixup for long mode. \n",
-                      __FUNCTION__);
-               vmcs_write32(GUEST_TR_AR_BYTES,
-                            (guest_tr_ar & ~AR_TYPE_MASK)
-                            | AR_TYPE_BUSY_64_TSS);
+               printd("%s: tss fixup for long mode. \n", __FUNCTION__);
+               vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~AR_TYPE_MASK)
+                                        | AR_TYPE_BUSY_64_TSS);
        }
 
        vcpu->shadow_efer |= EFER_LMA;
 
        find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
-       vmcs_write32(VM_ENTRY_CONTROLS,
-                    vmcs_read32(VM_ENTRY_CONTROLS)
-                    | VM_ENTRY_CONTROLS_IA32E_MASK);
+       vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS)
+                                | VM_ENTRY_CONTROLS_IA32E_MASK);
        print_func_exit();
 }
 
@@ -964,9 +951,8 @@ static void exit_lmode(struct litevm_vcpu *vcpu)
        print_func_entry();
        vcpu->shadow_efer &= ~EFER_LMA;
 
-       vmcs_write32(VM_ENTRY_CONTROLS,
-                    vmcs_read32(VM_ENTRY_CONTROLS)
-                    & ~VM_ENTRY_CONTROLS_IA32E_MASK);
+       vmcs_write32(VM_ENTRY_CONTROLS, vmcs_read32(VM_ENTRY_CONTROLS)
+                                & ~VM_ENTRY_CONTROLS_IA32E_MASK);
        print_func_exit();
 }
 
@@ -996,11 +982,11 @@ static void __set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
 }
 
 static int pdptrs_have_reserved_bits_set(struct litevm_vcpu *vcpu,
-                                        unsigned long cr3)
+                                                                                unsigned long cr3)
 {
        print_func_entry();
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
-       unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5;
+       unsigned offset = (cr3 & (PAGE_SIZE - 1)) >> 5;
        int i;
        uint64_t pdpte;
        uint64_t *pdpt;
@@ -1027,8 +1013,7 @@ static void set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
 {
        print_func_entry();
        if (cr0 & CR0_RESEVED_BITS) {
-               printd("set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
-                      cr0, guest_cr0());
+               printd("set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", cr0, guest_cr0());
                inject_gp(vcpu);
                print_func_exit();
                return;
@@ -1042,8 +1027,7 @@ static void set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
        }
 
        if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
-               printd("set_cr0: #GP, set PG flag "
-                      "and a clear PE flag\n");
+               printd("set_cr0: #GP, set PG flag " "and a clear PE flag\n");
                inject_gp(vcpu);
                print_func_exit();
                return;
@@ -1055,7 +1039,7 @@ static void set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
                        uint32_t guest_cs_ar;
                        if (!is_pae()) {
                                printd("set_cr0: #GP, start paging "
-                                      "in long mode while PAE is disabled\n");
+                                          "in long mode while PAE is disabled\n");
                                inject_gp(vcpu);
                                print_func_exit();
                                return;
@@ -1063,7 +1047,7 @@ static void set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
                        guest_cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
                        if (guest_cs_ar & SEGMENT_AR_L_MASK) {
                                printd("set_cr0: #GP, start paging "
-                                      "in long mode while CS.L == 1\n");
+                                          "in long mode while CS.L == 1\n");
                                inject_gp(vcpu);
                                print_func_exit();
                                return;
@@ -1071,10 +1055,8 @@ static void set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
                        }
                } else
 #endif
-               if (is_pae() &&
-                           pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
-                       printd("set_cr0: #GP, pdptrs "
-                              "reserved bits\n");
+               if (is_pae() && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
+                       printd("set_cr0: #GP, pdptrs " "reserved bits\n");
                        inject_gp(vcpu);
                        print_func_exit();
                        return;
@@ -1110,7 +1092,8 @@ static void __set_cr4(struct litevm_vcpu *vcpu, unsigned long cr4)
        print_func_entry();
        vmcs_writel(CR4_READ_SHADOW, cr4);
        vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
-                   LITEVM_RMODE_VM_CR4_ALWAYS_ON : LITEVM_PMODE_VM_CR4_ALWAYS_ON));
+                                                                 LITEVM_RMODE_VM_CR4_ALWAYS_ON :
+                                                                 LITEVM_PMODE_VM_CR4_ALWAYS_ON));
        print_func_exit();
 }
 
@@ -1126,14 +1109,13 @@ static void set_cr4(struct litevm_vcpu *vcpu, unsigned long cr4)
 
        if (is_long_mode()) {
                if (!(cr4 & CR4_PAE_MASK)) {
-                       printd("set_cr4: #GP, clearing PAE while "
-                              "in long mode\n");
+                       printd("set_cr4: #GP, clearing PAE while " "in long mode\n");
                        inject_gp(vcpu);
                        print_func_exit();
                        return;
                }
        } else if (is_paging() && !is_pae() && (cr4 & CR4_PAE_MASK)
-                  && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
+                          && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
                printd("set_cr4: #GP, pdptrs reserved bits\n");
                inject_gp(vcpu);
        }
@@ -1155,7 +1137,7 @@ static void set_cr3(struct litevm_vcpu *vcpu, unsigned long cr3)
 {
        print_func_entry();
        if (is_long_mode()) {
-               if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
+               if (cr3 & CR3_L_MODE_RESEVED_BITS) {
                        printd("set_cr3: #GP, reserved bits\n");
                        inject_gp(vcpu);
                        print_func_exit();
@@ -1168,10 +1150,8 @@ static void set_cr3(struct litevm_vcpu *vcpu, unsigned long cr3)
                        print_func_exit();
                        return;
                }
-               if (is_paging() && is_pae() &&
-                   pdptrs_have_reserved_bits_set(vcpu, cr3)) {
-                       printd("set_cr3: #GP, pdptrs "
-                              "reserved bits\n");
+               if (is_paging() && is_pae() && pdptrs_have_reserved_bits_set(vcpu, cr3)) {
+                       printd("set_cr3: #GP, pdptrs " "reserved bits\n");
                        inject_gp(vcpu);
                        print_func_exit();
                        return;
@@ -1188,7 +1168,7 @@ static void set_cr3(struct litevm_vcpu *vcpu, unsigned long cr3)
 static void set_cr8(struct litevm_vcpu *vcpu, unsigned long cr8)
 {
        print_func_entry();
-       if ( cr8 & CR8_RESEVED_BITS) {
+       if (cr8 & CR8_RESEVED_BITS) {
                printd("set_cr8: #GP, reserved bits 0x%lx\n", cr8);
                inject_gp(vcpu);
                print_func_exit();
@@ -1203,8 +1183,7 @@ static uint32_t get_rdx_init_val(void)
        print_func_entry();
        uint32_t val;
 
-       asm ("movl $1, %%eax \n\t"
-            "movl %%eax, %0 \n\t" : "=g"(val) );
+asm("movl $1, %%eax \n\t" "movl %%eax, %0 \n\t":"=g"(val));
        print_func_exit();
        return val;
 
@@ -1214,12 +1193,12 @@ static void fx_init(struct litevm_vcpu *vcpu)
 {
        print_func_entry();
        struct __attribute__ ((__packed__)) fx_image_s {
-               uint16_t control; //fcw
-               uint16_t status; //fsw
-               uint16_t tag; // ftw
-               uint16_t opcode; //fop
-               uint64_t ip; // fpu ip
-               uint64_t operand;// fpu dp
+               uint16_t control;               //fcw
+               uint16_t status;                //fsw
+               uint16_t tag;                   // ftw
+               uint16_t opcode;                //fop
+               uint64_t ip;                    // fpu ip
+               uint64_t operand;               // fpu dp
                uint32_t mxcsr;
                uint32_t mxcsr_mask;
 
@@ -1233,11 +1212,12 @@ static void fx_init(struct litevm_vcpu *vcpu)
        fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
        fx_image->mxcsr = 0x1f80;
        memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
-              0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
+                  0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
        print_func_exit();
 }
 
-static void vmcs_write32_fixedbits(uint32_t msr, uint32_t vmcs_field, uint32_t val)
+static void vmcs_write32_fixedbits(uint32_t msr, uint32_t vmcs_field,
+                                                                  uint32_t val)
 {
        print_func_entry();
        uint32_t msr_high, msr_low;
@@ -1245,7 +1225,7 @@ static void vmcs_write32_fixedbits(uint32_t msr, uint32_t vmcs_field, uint32_t v
 
        msrval = read_msr(msr);
        msr_low = msrval;
-       msr_high = (msrval>>32);
+       msr_high = (msrval >> 32);
 
        val &= msr_high;
        val |= msr_low;
@@ -1258,7 +1238,7 @@ static void vmcs_write32_fixedbits(uint32_t msr, uint32_t vmcs_field, uint32_t v
  */
 static int litevm_vcpu_setup(struct litevm_vcpu *vcpu)
 {
-print_func_entry();
+       print_func_entry();
 /* no op on x86_64 */
 #define asmlinkage
        extern asmlinkage void litevm_vmx_return(void);
@@ -1271,7 +1251,6 @@ print_func_entry();
        uint64_t tsc;
        int nr_good_msrs;
 
-
        if (!init_rmode_tss(vcpu->litevm)) {
                error("vcpu_setup: init_rmode_tss failed");
        }
@@ -1280,8 +1259,8 @@ print_func_entry();
        vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
        vcpu->cr8 = 0;
        vcpu->apic_base = 0xfee00000 |
-                       /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
-                       MSR_IA32_APICBASE_ENABLE;
+               /*for vcpu 0 */ MSR_IA32_APICBASE_BSP |
+               MSR_IA32_APICBASE_ENABLE;
 
        fx_init(vcpu);
 
@@ -1347,76 +1326,69 @@ print_func_entry();
        tsc = read_tsc();
        vmcs_write64(TSC_OFFSET, -tsc);
 
-       vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
+       vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
 
        /* Special registers */
        vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
 
        /* Control */
-       vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS_MSR,
-                              PIN_BASED_VM_EXEC_CONTROL,
-                              PIN_BASED_EXT_INTR_MASK   /* 20.6.1 */
-                              | PIN_BASED_NMI_EXITING   /* 20.6.1 */
-                       );
-       vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS_MSR,
-                              CPU_BASED_VM_EXEC_CONTROL,
-                              CPU_BASED_HLT_EXITING         /* 20.6.2 */
-                              | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
-                              | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
-                              | CPU_BASED_UNCOND_IO_EXITING   /* 20.6.2 */
-                              | CPU_BASED_INVDPG_EXITING
-                              | CPU_BASED_MOV_DR_EXITING
-                              | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
-                       );
+       vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS_MSR, PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_EXT_INTR_MASK       /* 20.6.1 */
+                                                  | PIN_BASED_NMI_EXITING      /* 20.6.1 */
+               );
+       vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS_MSR, CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_HLT_EXITING        /* 20.6.2 */
+                                                  | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
+                                                  | CPU_BASED_CR8_STORE_EXITING        /* 20.6.2 */
+                                                  | CPU_BASED_UNCOND_IO_EXITING        /* 20.6.2 */
+                                                  | CPU_BASED_INVDPG_EXITING | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING  /* 21.3 */
+               );
 
        vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
-       vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
+       vmcs_write32(CR3_TARGET_COUNT, 0);      /* 22.2.1 */
 
-       vmcs_writel(HOST_CR0, rcr0());  /* 22.2.3 */
-       vmcs_writel(HOST_CR4, rcr4());  /* 22.2.3, 22.2.5 */
-       vmcs_writel(HOST_CR3, rcr3());  /* 22.2.3  FIXME: shadow tables */
+       vmcs_writel(HOST_CR0, rcr0());  /* 22.2.3 */
+       vmcs_writel(HOST_CR4, rcr4());  /* 22.2.3, 22.2.5 */
+       vmcs_writel(HOST_CR3, rcr3());  /* 22.2.3  FIXME: shadow tables */
 
 #warning "not setting selectors; do we need them?"
 #if 0
-       vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
-       vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
-       vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+       vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);    /* 22.2.4 */
+       vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);    /* 22.2.4 */
+       vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);    /* 22.2.4 */
 #endif
-       vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
-       vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
+       vmcs_write16(HOST_FS_SELECTOR, read_fs());      /* 22.2.4 */
+       vmcs_write16(HOST_GS_SELECTOR, read_gs());      /* 22.2.4 */
 #if 0
-       vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+       vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);    /* 22.2.4 */
 #endif
 #ifdef __x86_64__
        a = read_msr(MSR_FS_BASE);
-       vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
+       vmcs_writel(HOST_FS_BASE, a);   /* 22.2.4 */
        a = read_msr(MSR_GS_BASE);
-       vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
+       vmcs_writel(HOST_GS_BASE, a);   /* 22.2.4 */
 #else
-       vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
-       vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
+       vmcs_writel(HOST_FS_BASE, 0);   /* 22.2.4 */
+       vmcs_writel(HOST_GS_BASE, 0);   /* 22.2.4 */
 #endif
 
 #warning "Not setting HOST_TR_SELECTOR"
 #if 0
-       vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
+       vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS * 8);      /* 22.2.4 */
 #endif
 
        get_idt(&dt);
-       vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
-
+       vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
 
-       vmcs_writel(HOST_RIP, (unsigned long)litevm_vmx_return); /* 22.2.5 */
+       vmcs_writel(HOST_RIP, (unsigned long)litevm_vmx_return);        /* 22.2.5 */
 
        /* it's the HIGH 32 bits! */
        host_sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS) >> 32;
        vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
        a = read_msr(MSR_IA32_SYSENTER_ESP);
-       vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
+       vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
        a = read_msr(MSR_IA32_SYSENTER_EIP);
-       vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
+       vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
 
        ret = -ENOMEM;
        vcpu->guest_msrs = kmalloc(PAGE_SIZE, KMALLOC_WAIT);
@@ -1433,8 +1405,8 @@ print_func_entry();
                int j = vcpu->nmsrs;
 
 #warning "need readmsr_safe"
-//             if (rdmsr_safe(index, &data_low, &data_high) < 0)
-//                     continue;
+//      if (rdmsr_safe(index, &data_low, &data_high) < 0)
+//          continue;
                data = read_msr(index);
                vcpu->host_msrs[j].index = index;
                vcpu->host_msrs[j].reserved = 0;
@@ -1445,23 +1417,17 @@ print_func_entry();
        printk("msrs: %d\n", vcpu->nmsrs);
 
        nr_good_msrs = vcpu->nmsrs - NR_BAD_MSRS;
-       vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
-                   PADDR(vcpu->guest_msrs + NR_BAD_MSRS));
-       vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
-                   PADDR(vcpu->guest_msrs + NR_BAD_MSRS));
-       vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
-                   PADDR(vcpu->host_msrs + NR_BAD_MSRS));
-       vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CONTROLS,
-                              (HOST_IS_64 << 9));  /* 22.2,1, 20.7.1 */
-       vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
-       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2 */
-       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
-
+       vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, PADDR(vcpu->guest_msrs + NR_BAD_MSRS));
+       vmcs_writel(VM_EXIT_MSR_STORE_ADDR, PADDR(vcpu->guest_msrs + NR_BAD_MSRS));
+       vmcs_writel(VM_EXIT_MSR_LOAD_ADDR, PADDR(vcpu->host_msrs + NR_BAD_MSRS));
+       vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CONTROLS, (HOST_IS_64 << 9));        /* 22.2,1, 20.7.1 */
+       vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs);    /* 22.2.2 */
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);     /* 22.2.2 */
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs);    /* 22.2.2 */
 
        /* 22.2.1, 20.8.1 */
-       vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS_MSR,
-                               VM_ENTRY_CONTROLS, 0);
-       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
+       vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CONTROLS, 0);
+       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);      /* 22.2.1 */
 
        vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
        vmcs_writel(TPR_THRESHOLD, 0);
@@ -1469,7 +1435,7 @@ print_func_entry();
        vmcs_writel(CR0_GUEST_HOST_MASK, LITEVM_GUEST_CR0_MASK);
        vmcs_writel(CR4_GUEST_HOST_MASK, LITEVM_GUEST_CR4_MASK);
 
-       __set_cr0(vcpu, 0x60000010); // enter rmode
+       __set_cr0(vcpu, 0x60000010);    // enter rmode
        __set_cr4(vcpu, 0);
 #ifdef __x86_64__
        __set_efer(vcpu, 0);
@@ -1522,9 +1488,11 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
        struct vmcs *vmcs;
        char *errstring = NULL;
 
-       if (n < 0 || n >= LITEVM_MAX_VCPUS){
-               printk("%d is out of range; LITEVM_MAX_VCPUS is %d", n, LITEVM_MAX_VCPUS);
-               error("%d is out of range; LITEVM_MAX_VCPUS is %d", n, LITEVM_MAX_VCPUS);
+       if (n < 0 || n >= LITEVM_MAX_VCPUS) {
+               printk("%d is out of range; LITEVM_MAX_VCPUS is %d", n,
+                          LITEVM_MAX_VCPUS);
+               error("%d is out of range; LITEVM_MAX_VCPUS is %d", n,
+                         LITEVM_MAX_VCPUS);
        }
 
        vcpu = &litevm->vcpus[n];
@@ -1540,14 +1508,14 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
        /* I'm a bad person */
        //ALIGN(vcpu->fx_buf, FX_IMAGE_ALIGN);
        uint64_t a = (uint64_t) vcpu->fx_buf;
-       a += FX_IMAGE_ALIGN-1;
+       a += FX_IMAGE_ALIGN - 1;
        a /= FX_IMAGE_ALIGN;
        a *= FX_IMAGE_ALIGN;
 
-       vcpu->host_fx_image = (char*)a;
+       vcpu->host_fx_image = (char *)a;
        vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
 
-       vcpu->cpu = -1;  /* First load will set up TR */
+       vcpu->cpu = -1; /* First load will set up TR */
        vcpu->litevm = litevm;
 
        vmcs = alloc_vmcs();
@@ -1566,8 +1534,8 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
        __vcpu_load(vcpu);
 
        printk("PAST vcpu_load\n");
-       #warning unmatched waserror!
-       if (waserror()){
+#warning unmatched waserror!
+       if (waserror()) {
                /* we really need to fix waserror() */
                poperror();
                goto out_free_vcpus;
@@ -1579,8 +1547,8 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
 
        printk("r is %d\n", r);
 
-       if (! r) {
-               
+       if (!r) {
+
                print_func_exit();
                return 0;
        }
@@ -1603,7 +1571,7 @@ out:
  * Discontiguous memory is allowed, mostly for framebuffers.
  */
 int vm_set_memory_region(struct litevm *litevm,
-                                          struct litevm_memory_region *mem)
+                                                struct litevm_memory_region *mem)
 {
        print_func_entry();
        ERRSTACK(2);
@@ -1619,7 +1587,7 @@ int vm_set_memory_region(struct litevm *litevm,
 
        printk("litevm %p\n", litevm);
        /* should not happen but ... */
-       if (! litevm)
+       if (!litevm)
                error("NULL litevm in %s", __func__);
 
        if (!mem)
@@ -1632,12 +1600,13 @@ int vm_set_memory_region(struct litevm *litevm,
        if (mem->memory_size & (PAGE_SIZE - 1))
                error("mem->memory_size %lld is not page-aligned", mem->memory_size);
        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
-               error("guest_phys_addr 0x%llx is not page-aligned", mem->guest_phys_addr);
+               error("guest_phys_addr 0x%llx is not page-aligned",
+                         mem->guest_phys_addr);
        if (mem->slot >= LITEVM_MEMORY_SLOTS)
                error("Slot %d is >= %d", mem->slot, LITEVM_MEMORY_SLOTS);
        if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
-               error("0x%x + 0x%x is < 0x%x", 
-                     mem->guest_phys_addr, mem->memory_size, mem->guest_phys_addr);
+               error("0x%x + 0x%x is < 0x%x",
+                         mem->guest_phys_addr, mem->memory_size, mem->guest_phys_addr);
 
        memslot = &litevm->memslots[mem->slot];
        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
@@ -1656,11 +1625,11 @@ raced:
        spin_lock_irqsave(&litevm->lock);
        printk("locked\n");
 
-       if (waserror()){
+       if (waserror()) {
                spin_unlock(&litevm->lock);
                nexterror();
        }
-               
+
        memory_config_version = litevm->memory_config_version;
        new = old = *memslot;
 
@@ -1672,7 +1641,7 @@ raced:
        r = -EINVAL;
        if (npages && old.npages && npages != old.npages)
                error("npages is %d, old.npages is %d, can't change",
-                     npages, old.npages);
+                         npages, old.npages);
 
        /* Check for overlaps */
        r = -EEXIST;
@@ -1682,7 +1651,7 @@ raced:
                if (s == memslot)
                        continue;
                if (!((base_gfn + npages <= s->base_gfn) ||
-                     (base_gfn >= s->base_gfn + s->npages)))
+                         (base_gfn >= s->base_gfn + s->npages)))
                        error("Overlap");
        }
        /*
@@ -1715,9 +1684,9 @@ raced:
                        ret = kpage_alloc(&new.phys_mem[i]);
                        if (ret != ESUCCESS)
                                goto out_free;
-                       if (init_data){
+                       if (init_data) {
                                printk("init data memcpy(%p,%p,4096);\n",
-                                      page2kva(new.phys_mem[i]), init_data);
+                                          page2kva(new.phys_mem[i]), init_data);
                                memcpy(page2kva(new.phys_mem[i]), init_data, PAGE_SIZE);
                                init_data += PAGE_SIZE;
                        }
@@ -1726,11 +1695,13 @@ raced:
 
        /* Allocate page dirty bitmap if needed */
        if ((new.flags & LITEVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
-               unsigned dirty_bytes;//ALIGN(npages, BITS_PER_LONG) / 8;
-               dirty_bytes = (((npages + BITS_PER_LONG-1)/BITS_PER_LONG)*BITS_PER_LONG)/8;
+               unsigned dirty_bytes;   //ALIGN(npages, BITS_PER_LONG) / 8;
+               dirty_bytes =
+                       (((npages + BITS_PER_LONG -
+                          1) / BITS_PER_LONG) * BITS_PER_LONG) / 8;
 
                new.dirty_bitmap = kzmalloc(dirty_bytes, KMALLOC_WAIT);
-               if (!new.dirty_bitmap){
+               if (!new.dirty_bitmap) {
                        printk("VM: alloc of %d bytes for map failed\n", dirty_bytes);
                        goto out_free;
                }
@@ -1746,7 +1717,7 @@ raced:
        }
 
        r = -EAGAIN;
-       if (litevm->busy){
+       if (litevm->busy) {
                printk("BUSY!\n");
                goto out_unlock;
        }
@@ -1790,7 +1761,7 @@ out:
  * Get (and clear) the dirty memory log for a memory slot.
  */
 static int litevm_dev_ioctl_get_dirty_log(struct litevm *litevm,
-                                      struct litevm_dirty_log *log)
+                                                                                 struct litevm_dirty_log *log)
 {
        struct litevm_memory_slot *memslot;
        int r, i;
@@ -1823,7 +1794,6 @@ static int litevm_dev_ioctl_get_dirty_log(struct litevm *litevm,
        if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
                goto out;
 
-
        if (any) {
                spin_lock_irqsave(&litevm->lock);
                litevm_mmu_slot_remove_write_access(litevm, log->slot);
@@ -1858,7 +1828,7 @@ struct litevm_memory_slot *gfn_to_memslot(struct litevm *litevm, gfn_t gfn)
                struct litevm_memory_slot *memslot = &litevm->memslots[i];
 
                if (gfn >= memslot->base_gfn
-                   && gfn < memslot->base_gfn + memslot->npages) {
+                       && gfn < memslot->base_gfn + memslot->npages) {
                        print_func_exit();
                        return memslot;
                }
@@ -1878,7 +1848,7 @@ void mark_page_dirty(struct litevm *litevm, gfn_t gfn)
                memslot = &litevm->memslots[i];
 
                if (gfn >= memslot->base_gfn
-                   && gfn < memslot->base_gfn + memslot->npages) {
+                       && gfn < memslot->base_gfn + memslot->npages) {
 
                        if (!memslot || !memslot->dirty_bitmap) {
                                print_func_exit();
@@ -1913,15 +1883,13 @@ static void skip_emulated_instruction(struct litevm_vcpu *vcpu)
         */
        interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
        if (interruptibility & 3)
-               vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
-                            interruptibility & ~3);
+               vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility & ~3);
        print_func_exit();
 }
 
 static int emulator_read_std(unsigned long addr,
-                            unsigned long *val,
-                            unsigned int bytes,
-                            struct x86_emulate_ctxt *ctxt)
+                                                        unsigned long *val,
+                                                        unsigned int bytes, struct x86_emulate_ctxt *ctxt)
 {
        print_func_entry();
        struct litevm_vcpu *vcpu = ctxt->vcpu;
@@ -1929,8 +1897,8 @@ static int emulator_read_std(unsigned long addr,
 
        while (bytes) {
                gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
-               unsigned offset = addr & (PAGE_SIZE-1);
-               unsigned tocopy = bytes < (unsigned)PAGE_SIZE - offset ? 
+               unsigned offset = addr & (PAGE_SIZE - 1);
+               unsigned tocopy = bytes < (unsigned)PAGE_SIZE - offset ?
                        bytes : (unsigned)PAGE_SIZE - offset;
                unsigned long pfn;
                struct litevm_memory_slot *memslot;
@@ -1960,21 +1928,19 @@ static int emulator_read_std(unsigned long addr,
 }
 
 static int emulator_write_std(unsigned long addr,
-                             unsigned long val,
-                             unsigned int bytes,
-                             struct x86_emulate_ctxt *ctxt)
+                                                         unsigned long val,
+                                                         unsigned int bytes, struct x86_emulate_ctxt *ctxt)
 {
        print_func_entry();
-       printk("emulator_write_std: addr %lx n %d\n",
-              addr, bytes);
+       printk("emulator_write_std: addr %lx n %d\n", addr, bytes);
        print_func_exit();
        return X86EMUL_UNHANDLEABLE;
 }
 
 static int emulator_read_emulated(unsigned long addr,
-                                 unsigned long *val,
-                                 unsigned int bytes,
-                                 struct x86_emulate_ctxt *ctxt)
+                                                                 unsigned long *val,
+                                                                 unsigned int bytes,
+                                                                 struct x86_emulate_ctxt *ctxt)
 {
        print_func_entry();
        struct litevm_vcpu *vcpu = ctxt->vcpu;
@@ -1985,11 +1951,10 @@ static int emulator_read_emulated(unsigned long addr,
                print_func_exit();
                return X86EMUL_CONTINUE;
        } else if (emulator_read_std(addr, val, bytes, ctxt)
-                  == X86EMUL_CONTINUE) {
+                          == X86EMUL_CONTINUE) {
                print_func_exit();
                return X86EMUL_CONTINUE;
-       }
-       else {
+       } else {
                gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
                if (gpa == UNMAPPED_GVA) {
                        print_func_exit();
@@ -2006,9 +1971,9 @@ static int emulator_read_emulated(unsigned long addr,
 }
 
 static int emulator_write_emulated(unsigned long addr,
-                                  unsigned long val,
-                                  unsigned int bytes,
-                                  struct x86_emulate_ctxt *ctxt)
+                                                                  unsigned long val,
+                                                                  unsigned int bytes,
+                                                                  struct x86_emulate_ctxt *ctxt)
 {
        print_func_entry();
        struct litevm_vcpu *vcpu = ctxt->vcpu;
@@ -2030,10 +1995,10 @@ static int emulator_write_emulated(unsigned long addr,
 }
 
 static int emulator_cmpxchg_emulated(unsigned long addr,
-                                    unsigned long old,
-                                    unsigned long new,
-                                    unsigned int bytes,
-                                    struct x86_emulate_ctxt *ctxt)
+                                                                        unsigned long old,
+                                                                        unsigned long new,
+                                                                        unsigned int bytes,
+                                                                        struct x86_emulate_ctxt *ctxt)
 {
        print_func_entry();
        static int reported;
@@ -2062,30 +2027,29 @@ static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
        emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
 
        printk("emulation failed but !mmio_needed?"
-              " rip %lx %02x %02x %02x %02x\n",
-              rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+                  " rip %lx %02x %02x %02x %02x\n",
+                  rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
        reported = 1;
        print_func_exit();
 }
 
 struct x86_emulate_ops emulate_ops = {
-       .read_std            = emulator_read_std,
-       .write_std           = emulator_write_std,
-       .read_emulated       = emulator_read_emulated,
-       .write_emulated      = emulator_write_emulated,
-       .cmpxchg_emulated    = emulator_cmpxchg_emulated,
+       .read_std = emulator_read_std,
+       .write_std = emulator_write_std,
+       .read_emulated = emulator_read_emulated,
+       .write_emulated = emulator_write_emulated,
+       .cmpxchg_emulated = emulator_cmpxchg_emulated,
 };
 
 enum emulation_result {
-       EMULATE_DONE,       /* no further processing */
-       EMULATE_DO_MMIO,      /* litevm_run filled with mmio request */
-       EMULATE_FAIL,         /* can't emulate this instruction */
+       EMULATE_DONE,                           /* no further processing */
+       EMULATE_DO_MMIO,                        /* litevm_run filled with mmio request */
+       EMULATE_FAIL,                           /* can't emulate this instruction */
 };
 
 static int emulate_instruction(struct litevm_vcpu *vcpu,
-                              struct litevm_run *run,
-                              unsigned long cr2,
-                              uint16_t error_code)
+                                                          struct litevm_run *run,
+                                                          unsigned long cr2, uint16_t error_code)
 {
        print_func_entry();
        struct x86_emulate_ctxt emulate_ctxt;
@@ -2101,7 +2065,7 @@ static int emulate_instruction(struct litevm_vcpu *vcpu,
        emulate_ctxt.cr2 = cr2;
        emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
                ? X86EMUL_MODE_REAL : (cs_ar & AR_L_MASK)
-               ? X86EMUL_MODE_PROT64 : (cs_ar & AR_DB_MASK)
+               ? X86EMUL_MODE_PROT64 : (cs_ar & AR_DB_MASK)
                ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
        if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
@@ -2176,7 +2140,7 @@ void realmode_lidt(struct litevm_vcpu *vcpu, uint16_t limit, unsigned long base)
 }
 
 void realmode_lmsw(struct litevm_vcpu *vcpu, unsigned long msw,
-                  unsigned long *rflags)
+                                  unsigned long *rflags)
 {
        print_func_entry();
        lmsw(vcpu, msw);
@@ -2188,51 +2152,51 @@ unsigned long realmode_get_cr(struct litevm_vcpu *vcpu, int cr)
 {
        print_func_entry();
        switch (cr) {
-       case 0:
-               print_func_exit();
-               return guest_cr0();
-       case 2:
-               print_func_exit();
-               return vcpu->cr2;
-       case 3:
-               print_func_exit();
-               return vcpu->cr3;
-       case 4:
-               print_func_exit();
-               return guest_cr4();
-       default:
-               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
-               print_func_exit();
-               return 0;
+               case 0:
+                       print_func_exit();
+                       return guest_cr0();
+               case 2:
+                       print_func_exit();
+                       return vcpu->cr2;
+               case 3:
+                       print_func_exit();
+                       return vcpu->cr3;
+               case 4:
+                       print_func_exit();
+                       return guest_cr4();
+               default:
+                       vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+                       print_func_exit();
+                       return 0;
        }
 }
 
 void realmode_set_cr(struct litevm_vcpu *vcpu, int cr, unsigned long val,
-                    unsigned long *rflags)
+                                        unsigned long *rflags)
 {
        print_func_entry();
        switch (cr) {
-       case 0:
-               set_cr0(vcpu, mk_cr_64(guest_cr0(), val));
-               *rflags = vmcs_readl(GUEST_RFLAGS);
-               break;
-       case 2:
-               vcpu->cr2 = val;
-               break;
-       case 3:
-               set_cr3(vcpu, val);
-               break;
-       case 4:
-               set_cr4(vcpu, mk_cr_64(guest_cr4(), val));
-               break;
-       default:
-               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+               case 0:
+                       set_cr0(vcpu, mk_cr_64(guest_cr0(), val));
+                       *rflags = vmcs_readl(GUEST_RFLAGS);
+                       break;
+               case 2:
+                       vcpu->cr2 = val;
+                       break;
+               case 3:
+                       set_cr3(vcpu, val);
+                       break;
+               case 4:
+                       set_cr4(vcpu, mk_cr_64(guest_cr4(), val));
+                       break;
+               default:
+                       vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
        }
        print_func_exit();
 }
 
 static int handle_rmode_exception(struct litevm_vcpu *vcpu,
-                                 int vec, uint32_t err_code)
+                                                                 int vec, uint32_t err_code)
 {
        print_func_entry();
        if (!vcpu->rmode.active) {
@@ -2249,7 +2213,8 @@ static int handle_rmode_exception(struct litevm_vcpu *vcpu,
        return 0;
 }
 
-static int handle_exception(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
+static int handle_exception(struct litevm_vcpu *vcpu,
+                                                       struct litevm_run *litevm_run)
 {
        print_func_entry();
        uint32_t intr_info, error_code;
@@ -2260,20 +2225,20 @@ static int handle_exception(struct litevm_vcpu *vcpu, struct litevm_run *litevm_
        vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
-       if ((vect_info & VECTORING_INFO_VALID_MASK) &&
-                                               !is_page_fault(intr_info)) {
+       if ((vect_info & VECTORING_INFO_VALID_MASK) && !is_page_fault(intr_info)) {
                printk("%s: unexpected, vectoring info 0x%x "
-                      "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
+                          "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
        }
 
        if (is_external_interrupt(vect_info)) {
                int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
-               SET_BITMASK_BIT_ATOMIC(((uint8_t *)&vcpu->irq_pending), irq);
-               SET_BITMASK_BIT_ATOMIC(((uint8_t *)&vcpu->irq_summary), irq / BITS_PER_LONG);
+               SET_BITMASK_BIT_ATOMIC(((uint8_t *) & vcpu->irq_pending), irq);
+               SET_BITMASK_BIT_ATOMIC(((uint8_t *) & vcpu->irq_summary),
+                                                          irq / BITS_PER_LONG);
        }
 
-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
-               asm ("int $2");
+       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) {  /* nmi */
+               asm("int $2");
                print_func_exit();
                return 1;
        }
@@ -2295,30 +2260,31 @@ static int handle_exception(struct litevm_vcpu *vcpu, struct litevm_run *litevm_
                spin_unlock(&vcpu->litevm->lock);
 
                switch (er) {
-               case EMULATE_DONE:
-                       print_func_exit();
-                       return 1;
-               case EMULATE_DO_MMIO:
-                       ++litevm_stat.mmio_exits;
-                       litevm_run->exit_reason = LITEVM_EXIT_MMIO;
-                       print_func_exit();
-                       return 0;
-                case EMULATE_FAIL:
-                       vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
-                       break;
-               default:
-                       assert(0);
+                       case EMULATE_DONE:
+                               print_func_exit();
+                               return 1;
+                       case EMULATE_DO_MMIO:
+                               ++litevm_stat.mmio_exits;
+                               litevm_run->exit_reason = LITEVM_EXIT_MMIO;
+                               print_func_exit();
+                               return 0;
+                       case EMULATE_FAIL:
+                               vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+                               break;
+                       default:
+                               assert(0);
                }
        }
 
        if (vcpu->rmode.active &&
-           handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
-                                                               error_code)) {
-               print_func_exit();
-                   return 1;
-           }
+               handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
+                                                          error_code)) {
+               print_func_exit();
+               return 1;
+       }
 
-       if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
+       if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) ==
+               (INTR_TYPE_EXCEPTION | 1)) {
                litevm_run->exit_reason = LITEVM_EXIT_DEBUG;
                print_func_exit();
                return 0;
@@ -2331,7 +2297,7 @@ static int handle_exception(struct litevm_vcpu *vcpu, struct litevm_run *litevm_
 }
 
 static int handle_external_interrupt(struct litevm_vcpu *vcpu,
-                                    struct litevm_run *litevm_run)
+                                                                        struct litevm_run *litevm_run)
 {
        print_func_entry();
        ++litevm_stat.irq_exits;
@@ -2339,8 +2305,7 @@ static int handle_external_interrupt(struct litevm_vcpu *vcpu,
        return 1;
 }
 
-
-static int get_io_count(struct litevm_vcpu *vcpu, uint64_t *count)
+static int get_io_count(struct litevm_vcpu *vcpu, uint64_t * count)
 {
        print_func_entry();
        uint64_t inst;
@@ -2353,33 +2318,32 @@ static int get_io_count(struct litevm_vcpu *vcpu, uint64_t *count)
        } else {
                uint32_t cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
 
-               countr_size = (cs_ar & AR_L_MASK) ? 8:
-                             (cs_ar & AR_DB_MASK) ? 4: 2;
+               countr_size = (cs_ar & AR_L_MASK) ? 8 : (cs_ar & AR_DB_MASK) ? 4 : 2;
        }
 
-       rip =  vmcs_readl(GUEST_RIP);
+       rip = vmcs_readl(GUEST_RIP);
        if (countr_size != 8)
                rip += vmcs_readl(GUEST_CS_BASE);
 
        n = litevm_read_guest(vcpu, rip, sizeof(inst), &inst);
 
        for (i = 0; i < n; i++) {
-               switch (((uint8_t*)&inst)[i]) {
-               case 0xf0:
-               case 0xf2:
-               case 0xf3:
-               case 0x2e:
-               case 0x36:
-               case 0x3e:
-               case 0x26:
-               case 0x64:
-               case 0x65:
-               case 0x66:
-                       break;
-               case 0x67:
-                       countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
-               default:
-                       goto done;
+               switch (((uint8_t *) & inst)[i]) {
+                       case 0xf0:
+                       case 0xf2:
+                       case 0xf3:
+                       case 0x2e:
+                       case 0x36:
+                       case 0x3e:
+                       case 0x26:
+                       case 0x64:
+                       case 0x65:
+                       case 0x66:
+                               break;
+                       case 0x67:
+                               countr_size = (countr_size == 2) ? 4 : (countr_size >> 1);
+                       default:
+                               goto done;
                }
        }
        print_func_exit();
@@ -2416,12 +2380,13 @@ static int handle_io(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
                }
                litevm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS);
        } else
-               litevm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */
+               litevm_run->io.value = vcpu->regs[VCPU_REGS_RAX];       /* rax */
        print_func_exit();
        return 0;
 }
 
-static int handle_invlpg(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
+static int handle_invlpg(struct litevm_vcpu *vcpu,
+                                                struct litevm_run *litevm_run)
 {
        print_func_entry();
        uint64_t address = vmcs_read64(EXIT_QUALIFICATION);
@@ -2454,66 +2419,65 @@ static int handle_cr(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
        cr = exit_qualification & 15;
        reg = (exit_qualification >> 8) & 15;
        switch ((exit_qualification >> 4) & 3) {
-       case 0: /* mov to cr */
-               switch (cr) {
-               case 0:
-                       vcpu_load_rsp_rip(vcpu);
-                       set_cr0(vcpu, vcpu->regs[reg]);
-                       skip_emulated_instruction(vcpu);
-                       print_func_exit();
-                       return 1;
-               case 3:
-                       vcpu_load_rsp_rip(vcpu);
-                       set_cr3(vcpu, vcpu->regs[reg]);
-                       skip_emulated_instruction(vcpu);
-                       print_func_exit();
-                       return 1;
-               case 4:
-                       vcpu_load_rsp_rip(vcpu);
-                       set_cr4(vcpu, vcpu->regs[reg]);
-                       skip_emulated_instruction(vcpu);
-                       print_func_exit();
-                       return 1;
-               case 8:
-                       vcpu_load_rsp_rip(vcpu);
-                       set_cr8(vcpu, vcpu->regs[reg]);
-                       skip_emulated_instruction(vcpu);
-                       print_func_exit();
-                       return 1;
-               };
-               break;
-       case 1: /*mov from cr*/
-               switch (cr) {
-               case 3:
-                       vcpu_load_rsp_rip(vcpu);
-                       vcpu->regs[reg] = vcpu->cr3;
-                       vcpu_put_rsp_rip(vcpu);
-                       skip_emulated_instruction(vcpu);
-                       print_func_exit();
-                       return 1;
-               case 8:
-                       printd("handle_cr: read CR8 "
-                              "cpu erratum AA15\n");
-                       vcpu_load_rsp_rip(vcpu);
-                       vcpu->regs[reg] = vcpu->cr8;
-                       vcpu_put_rsp_rip(vcpu);
+               case 0: /* mov to cr */
+                       switch (cr) {
+                               case 0:
+                                       vcpu_load_rsp_rip(vcpu);
+                                       set_cr0(vcpu, vcpu->regs[reg]);
+                                       skip_emulated_instruction(vcpu);
+                                       print_func_exit();
+                                       return 1;
+                               case 3:
+                                       vcpu_load_rsp_rip(vcpu);
+                                       set_cr3(vcpu, vcpu->regs[reg]);
+                                       skip_emulated_instruction(vcpu);
+                                       print_func_exit();
+                                       return 1;
+                               case 4:
+                                       vcpu_load_rsp_rip(vcpu);
+                                       set_cr4(vcpu, vcpu->regs[reg]);
+                                       skip_emulated_instruction(vcpu);
+                                       print_func_exit();
+                                       return 1;
+                               case 8:
+                                       vcpu_load_rsp_rip(vcpu);
+                                       set_cr8(vcpu, vcpu->regs[reg]);
+                                       skip_emulated_instruction(vcpu);
+                                       print_func_exit();
+                                       return 1;
+                       };
+                       break;
+               case 1: /*mov from cr */
+                       switch (cr) {
+                               case 3:
+                                       vcpu_load_rsp_rip(vcpu);
+                                       vcpu->regs[reg] = vcpu->cr3;
+                                       vcpu_put_rsp_rip(vcpu);
+                                       skip_emulated_instruction(vcpu);
+                                       print_func_exit();
+                                       return 1;
+                               case 8:
+                                       printd("handle_cr: read CR8 " "cpu erratum AA15\n");
+                                       vcpu_load_rsp_rip(vcpu);
+                                       vcpu->regs[reg] = vcpu->cr8;
+                                       vcpu_put_rsp_rip(vcpu);
+                                       skip_emulated_instruction(vcpu);
+                                       print_func_exit();
+                                       return 1;
+                       }
+                       break;
+               case 3: /* lmsw */
+                       lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+
                        skip_emulated_instruction(vcpu);
                        print_func_exit();
                        return 1;
-               }
-               break;
-       case 3: /* lmsw */
-               lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
-
-               skip_emulated_instruction(vcpu);
-               print_func_exit();
-               return 1;
-       default:
-               break;
+               default:
+                       break;
        }
        litevm_run->exit_reason = 0;
        printk("litevm: unhandled control register: op %d cr %d\n",
-              (int)(exit_qualification >> 4) & 3, cr);
+                  (int)(exit_qualification >> 4) & 3, cr);
        print_func_exit();
        return 0;
 }
@@ -2536,14 +2500,14 @@ static int handle_dr(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
        if (exit_qualification & 16) {
                /* mov from dr */
                switch (dr) {
-               case 6:
-                       val = 0xffff0ff0;
-                       break;
-               case 7:
-                       val = 0x400;
-                       break;
-               default:
-                       val = 0;
+                       case 6:
+                               val = 0xffff0ff0;
+                               break;
+                       case 7:
+                               val = 0x400;
+                               break;
+                       default:
+                               val = 0;
                }
                vcpu->regs[reg] = val;
        } else {
@@ -2578,47 +2542,47 @@ static int handle_rdmsr(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
        }
 
        switch (ecx) {
-       case MSR_FS_BASE:
-               data = vmcs_readl(GUEST_FS_BASE);
-               break;
-       case MSR_GS_BASE:
-               data = vmcs_readl(GUEST_GS_BASE);
-               break;
-       case MSR_IA32_SYSENTER_CS:
-               data = vmcs_read32(GUEST_SYSENTER_CS);
-               break;
-       case MSR_IA32_SYSENTER_EIP:
-               data = vmcs_read32(GUEST_SYSENTER_EIP);
-               break;
-       case MSR_IA32_SYSENTER_ESP:
-               data = vmcs_read32(GUEST_SYSENTER_ESP);
-               break;
-       case MSR_IA32_MC0_CTL:
-       case MSR_IA32_MCG_STATUS:
-       case MSR_IA32_MCG_CAP:
-       case MSR_IA32_MC0_MISC:
-       case MSR_IA32_MC0_MISC+4:
-       case MSR_IA32_MC0_MISC+8:
-       case MSR_IA32_MC0_MISC+12:
-       case MSR_IA32_MC0_MISC+16:
-       case MSR_IA32_UCODE_REV:
-               /* MTRR registers */
-       case 0xfe:
-       case 0x200 ... 0x2ff:
-               data = 0;
-               break;
-       case MSR_IA32_APICBASE:
-               data = vcpu->apic_base;
-               break;
-       default:
-               if (msr) {
-                       data = msr->data;
+               case MSR_FS_BASE:
+                       data = vmcs_readl(GUEST_FS_BASE);
                        break;
-               }
-               printk("litevm: unhandled rdmsr: %x\n", ecx);
-               inject_gp(vcpu);
-               print_func_exit();
-               return 1;
+               case MSR_GS_BASE:
+                       data = vmcs_readl(GUEST_GS_BASE);
+                       break;
+               case MSR_IA32_SYSENTER_CS:
+                       data = vmcs_read32(GUEST_SYSENTER_CS);
+                       break;
+               case MSR_IA32_SYSENTER_EIP:
+                       data = vmcs_read32(GUEST_SYSENTER_EIP);
+                       break;
+               case MSR_IA32_SYSENTER_ESP:
+                       data = vmcs_read32(GUEST_SYSENTER_ESP);
+                       break;
+               case MSR_IA32_MC0_CTL:
+               case MSR_IA32_MCG_STATUS:
+               case MSR_IA32_MCG_CAP:
+               case MSR_IA32_MC0_MISC:
+               case MSR_IA32_MC0_MISC + 4:
+               case MSR_IA32_MC0_MISC + 8:
+               case MSR_IA32_MC0_MISC + 12:
+               case MSR_IA32_MC0_MISC + 16:
+               case MSR_IA32_UCODE_REV:
+                       /* MTRR registers */
+               case 0xfe:
+               case 0x200 ... 0x2ff:
+                       data = 0;
+                       break;
+               case MSR_IA32_APICBASE:
+                       data = vcpu->apic_base;
+                       break;
+               default:
+                       if (msr) {
+                               data = msr->data;
+                               break;
+                       }
+                       printk("litevm: unhandled rdmsr: %x\n", ecx);
+                       inject_gp(vcpu);
+                       print_func_exit();
+                       return 1;
        }
 
        /* FIXME: handling of bits 32:63 of rax, rdx */
@@ -2637,8 +2601,7 @@ static void set_efer(struct litevm_vcpu *vcpu, uint64_t efer)
        struct vmx_msr_entry *msr;
 
        if (efer & EFER_RESERVED_BITS) {
-               printd("set_efer: 0x%llx #GP, reserved bits\n",
-                      efer);
+               printd("set_efer: 0x%llx #GP, reserved bits\n", efer);
                inject_gp(vcpu);
                print_func_exit();
                return;
@@ -2659,7 +2622,7 @@ static void set_efer(struct litevm_vcpu *vcpu, uint64_t efer)
        msr = find_msr_entry(vcpu, MSR_EFER);
 
        if (!(efer & EFER_LMA))
-           efer &= ~EFER_LME;
+               efer &= ~EFER_LME;
        msr->data = efer;
        skip_emulated_instruction(vcpu);
        print_func_exit();
@@ -2675,7 +2638,7 @@ static int handle_wrmsr(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
        uint32_t ecx = vcpu->regs[VCPU_REGS_RCX];
        struct vmx_msr_entry *msr;
        uint64_t data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
-               | ((uint64_t)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
+               | ((uint64_t) (vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
 
        if (guest_cpl() != 0) {
                vcpu_printf(vcpu, "%s: not supervisor\n", __FUNCTION__);
@@ -2685,53 +2648,52 @@ static int handle_wrmsr(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
        }
 
        switch (ecx) {
-       case MSR_FS_BASE:
-               vmcs_writel(GUEST_FS_BASE, data);
-               break;
-       case MSR_GS_BASE:
-               vmcs_writel(GUEST_GS_BASE, data);
-               break;
-       case MSR_IA32_SYSENTER_CS:
-               vmcs_write32(GUEST_SYSENTER_CS, data);
-               break;
-       case MSR_IA32_SYSENTER_EIP:
-               vmcs_write32(GUEST_SYSENTER_EIP, data);
-               break;
-       case MSR_IA32_SYSENTER_ESP:
-               vmcs_write32(GUEST_SYSENTER_ESP, data);
-               break;
-       case MSR_EFER:
-               set_efer(vcpu, data);
-               print_func_exit();
-               return 1;
-       case MSR_IA32_MC0_STATUS:
-               printk("%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n"
-                           , __FUNCTION__, data);
-               break;
-       case MSR_IA32_TIME_STAMP_COUNTER: {
-               uint64_t tsc;
-               
-               tsc = read_tsc();
-               vmcs_write64(TSC_OFFSET, data - tsc);
-               break;
-       }
-       case MSR_IA32_UCODE_REV:
-       case MSR_IA32_UCODE_WRITE:
-       case 0x200 ... 0x2ff: /* MTRRs */
-               break;
-       case MSR_IA32_APICBASE:
-               vcpu->apic_base = data;
-               break;
-       default:
-               msr = find_msr_entry(vcpu, ecx);
-               if (msr) {
-                       msr->data = data;
+               case MSR_FS_BASE:
+                       vmcs_writel(GUEST_FS_BASE, data);
                        break;
-               }
-               printk("litevm: unhandled wrmsr: %x\n", ecx);
-               inject_gp(vcpu);
-               print_func_exit();
-               return 1;
+               case MSR_GS_BASE:
+                       vmcs_writel(GUEST_GS_BASE, data);
+                       break;
+               case MSR_IA32_SYSENTER_CS:
+                       vmcs_write32(GUEST_SYSENTER_CS, data);
+                       break;
+               case MSR_IA32_SYSENTER_EIP:
+                       vmcs_write32(GUEST_SYSENTER_EIP, data);
+                       break;
+               case MSR_IA32_SYSENTER_ESP:
+                       vmcs_write32(GUEST_SYSENTER_ESP, data);
+                       break;
+               case MSR_EFER:
+                       set_efer(vcpu, data);
+                       print_func_exit();
+                       return 1;
+               case MSR_IA32_MC0_STATUS:
+                       printk("%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", __FUNCTION__, data);
+                       break;
+               case MSR_IA32_TIME_STAMP_COUNTER:{
+                               uint64_t tsc;
+
+                               tsc = read_tsc();
+                               vmcs_write64(TSC_OFFSET, data - tsc);
+                               break;
+                       }
+               case MSR_IA32_UCODE_REV:
+               case MSR_IA32_UCODE_WRITE:
+               case 0x200 ... 0x2ff:   /* MTRRs */
+                       break;
+               case MSR_IA32_APICBASE:
+                       vcpu->apic_base = data;
+                       break;
+               default:
+                       msr = find_msr_entry(vcpu, ecx);
+                       if (msr) {
+                               msr->data = data;
+                               break;
+                       }
+                       printk("litevm: unhandled wrmsr: %x\n", ecx);
+                       inject_gp(vcpu);
+                       print_func_exit();
+                       return 1;
        }
        skip_emulated_instruction(vcpu);
        print_func_exit();
@@ -2739,13 +2701,13 @@ static int handle_wrmsr(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
 }
 
 static int handle_interrupt_window(struct litevm_vcpu *vcpu,
-                                  struct litevm_run *litevm_run)
+                                                                  struct litevm_run *litevm_run)
 {
        print_func_entry();
        /* Turn off interrupt window reporting. */
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
-                    vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
-                    & ~CPU_BASED_VIRTUAL_INTR_PENDING);
+                                vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
+                                & ~CPU_BASED_VIRTUAL_INTR_PENDING);
        print_func_exit();
        return 1;
 }
@@ -2769,20 +2731,19 @@ static int handle_halt(struct litevm_vcpu *vcpu, struct litevm_run *litevm_run)
  * may resume.  Otherwise they set the litevm_run parameter to indicate what needs
  * to be done to userspace and return 0.
  */
-static int (*litevm_vmx_exit_handlers[])(struct litevm_vcpu *vcpu,
-                                     struct litevm_run *litevm_run) = {
-       [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
-       [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
-       [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
-       [EXIT_REASON_INVLPG]                  = handle_invlpg,
-       [EXIT_REASON_CR_ACCESS]               = handle_cr,
-       [EXIT_REASON_DR_ACCESS]               = handle_dr,
-       [EXIT_REASON_CPUID]                   = handle_cpuid,
-       [EXIT_REASON_MSR_READ]                = handle_rdmsr,
-       [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
-       [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
-       [EXIT_REASON_HLT]                     = handle_halt,
-};
+static int (*litevm_vmx_exit_handlers[]) (struct litevm_vcpu * vcpu,
+                                                                                 struct litevm_run * litevm_run) = {
+[EXIT_REASON_EXCEPTION_NMI] = handle_exception,
+               [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
+               [EXIT_REASON_IO_INSTRUCTION] = handle_io,
+               [EXIT_REASON_INVLPG] = handle_invlpg,
+               [EXIT_REASON_CR_ACCESS] = handle_cr,
+               [EXIT_REASON_DR_ACCESS] = handle_dr,
+               [EXIT_REASON_CPUID] = handle_cpuid,
+               [EXIT_REASON_MSR_READ] = handle_rdmsr,
+               [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
+               [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
+               [EXIT_REASON_HLT] = handle_halt,};
 
 static const int litevm_vmx_max_exit_handlers =
        sizeof(litevm_vmx_exit_handlers) / sizeof(*litevm_vmx_exit_handlers);
@@ -2791,23 +2752,23 @@ static const int litevm_vmx_max_exit_handlers =
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
  */
-static int litevm_handle_exit(struct litevm_run *litevm_run, struct litevm_vcpu *vcpu)
+static int litevm_handle_exit(struct litevm_run *litevm_run,
+                                                         struct litevm_vcpu *vcpu)
 {
        print_func_entry();
        uint32_t vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        uint32_t exit_reason = vmcs_read32(VM_EXIT_REASON);
 
-       if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
-                               exit_reason != EXIT_REASON_EXCEPTION_NMI )
+       if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+               exit_reason != EXIT_REASON_EXCEPTION_NMI)
                printk("%s: unexpected, valid vectoring info and "
-                      "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
+                          "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
        litevm_run->instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
        if (exit_reason < litevm_vmx_max_exit_handlers
-           && litevm_vmx_exit_handlers[exit_reason]) {
+               && litevm_vmx_exit_handlers[exit_reason]) {
                print_func_exit();
-               return litevm_vmx_exit_handlers[exit_reason](vcpu, litevm_run);
-       }
-       else {
+               return litevm_vmx_exit_handlers[exit_reason] (vcpu, litevm_run);
+       } else {
                litevm_run->exit_reason = LITEVM_EXIT_UNKNOWN;
                litevm_run->hw.hardware_exit_reason = exit_reason;
        }
@@ -2823,42 +2784,40 @@ static void inject_rmode_irq(struct litevm_vcpu *vcpu, int irq)
        uint16_t ip;
        unsigned long flags;
        unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
-       uint16_t sp =  vmcs_readl(GUEST_RSP);
+       uint16_t sp = vmcs_readl(GUEST_RSP);
        uint32_t ss_limit = vmcs_read32(GUEST_SS_LIMIT);
 
        if (sp > ss_limit || ((sp - 6) > sp)) {
                vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
-                           __FUNCTION__,
-                           vmcs_readl(GUEST_RSP),
-                           vmcs_readl(GUEST_SS_BASE),
-                           vmcs_read32(GUEST_SS_LIMIT));
+                                       __FUNCTION__,
+                                       vmcs_readl(GUEST_RSP),
+                                       vmcs_readl(GUEST_SS_BASE), vmcs_read32(GUEST_SS_LIMIT));
                print_func_exit();
                return;
        }
 
        if (litevm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
-                                                               sizeof(ent)) {
+               sizeof(ent)) {
                //vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
                print_func_exit();
                return;
        }
 
-       flags =  vmcs_readl(GUEST_RFLAGS);
-       cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
-       ip =  vmcs_readl(GUEST_RIP);
-
+       flags = vmcs_readl(GUEST_RFLAGS);
+       cs = vmcs_readl(GUEST_CS_BASE) >> 4;
+       ip = vmcs_readl(GUEST_RIP);
 
        if (litevm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
-           litevm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
-           litevm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
+               litevm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
+               litevm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
                //vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
                print_func_exit();
                return;
        }
 
        vmcs_writel(GUEST_RFLAGS, flags &
-                   ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
-       vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
+                               ~(X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
+       vmcs_write16(GUEST_CS_SELECTOR, ent[1]);
        vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
        vmcs_writel(GUEST_RIP, ent[0]);
        vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
@@ -2877,7 +2836,7 @@ static void litevm_do_inject_irq(struct litevm_vcpu *vcpu)
         */
        vcpu->irq_pending[word_index] &= ~(1 << bit_index);
        if (!vcpu->irq_pending[word_index])
-               vcpu->irq_summary &= ~ (1 << word_index);
+               vcpu->irq_summary &= ~(1 << word_index);
 
        if (vcpu->rmode.active) {
                inject_rmode_irq(vcpu, irq);
@@ -2885,7 +2844,7 @@ static void litevm_do_inject_irq(struct litevm_vcpu *vcpu)
                return;
        }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
-                       irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+                                irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
        print_func_exit();
 }
 
@@ -2893,7 +2852,7 @@ static void litevm_try_inject_irq(struct litevm_vcpu *vcpu)
 {
        print_func_entry();
        if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)
-           && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0)
+               && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0)
                /*
                 * Interrupts enabled, and not blocked by sti or mov ss. Good.
                 */
@@ -2903,8 +2862,8 @@ static void litevm_try_inject_irq(struct litevm_vcpu *vcpu)
                 * Interrupts blocked.  Wait for unblock.
                 */
                vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
-                            vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
-                            | CPU_BASED_VIRTUAL_INTR_PENDING);
+                                        vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
+                                        | CPU_BASED_VIRTUAL_INTR_PENDING);
        print_func_exit();
 }
 
@@ -2961,7 +2920,7 @@ int vm_run(struct litevm *litevm, struct litevm_run *litevm_run)
 
        if (litevm_run->vcpu < 0 || litevm_run->vcpu >= LITEVM_MAX_VCPUS)
                error("vcpu is %d but must be in the range %d..%d\n",
-                     litevm_run->vcpu, LITEVM_MAX_VCPUS);
+                         litevm_run->vcpu, LITEVM_MAX_VCPUS);
 
        vcpu = vcpu_load(litevm, litevm_run->vcpu);
        if (!vcpu)
@@ -3002,7 +2961,7 @@ again:
 #endif
 
        if (vcpu->irq_summary &&
-           !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
+               !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
                litevm_try_inject_irq(vcpu);
 
        if (vcpu->guest_debug.enabled)
@@ -3014,124 +2973,92 @@ again:
        save_msrs(vcpu->host_msrs, vcpu->nmsrs);
        load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
 
-       asm (
-               /* Store host registers */
-               "pushf \n\t"
+       asm(
+                  /* Store host registers */
+                  "pushf \n\t"
 #ifdef __x86_64__
-               "push %%rax; push %%rbx; push %%rdx;"
-               "push %%rsi; push %%rdi; push %%rbp;"
-               "push %%r8;  push %%r9;  push %%r10; push %%r11;"
-               "push %%r12; push %%r13; push %%r14; push %%r15;"
-               "push %%rcx \n\t"
-               "vmwrite %%rsp, %2 \n\t"
+                  "push %%rax; push %%rbx; push %%rdx;"
+                  "push %%rsi; push %%rdi; push %%rbp;"
+                  "push %%r8;  push %%r9;  push %%r10; push %%r11;"
+                  "push %%r12; push %%r13; push %%r14; push %%r15;"
+                  "push %%rcx \n\t" "vmwrite %%rsp, %2 \n\t"
 #else
-               "pusha; push %%ecx \n\t"
-               "vmwrite %%esp, %2 \n\t"
+                  "pusha; push %%ecx \n\t" "vmwrite %%esp, %2 \n\t"
 #endif
-               /* Check if vmlaunch of vmresume is needed */
-               "cmp $0, %1 \n\t"
-               /* Load guest registers.  Don't clobber flags. */
+                  /* Check if vmlaunch of vmresume is needed */
+                  "cmp $0, %1 \n\t"
+                  /* Load guest registers.  Don't clobber flags. */
 #ifdef __x86_64__
-               "mov %c[cr2](%3), %%rax \n\t"
-               "mov %%rax, %%cr2 \n\t"
-               "mov %c[rax](%3), %%rax \n\t"
-               "mov %c[rbx](%3), %%rbx \n\t"
-               "mov %c[rdx](%3), %%rdx \n\t"
-               "mov %c[rsi](%3), %%rsi \n\t"
-               "mov %c[rdi](%3), %%rdi \n\t"
-               "mov %c[rbp](%3), %%rbp \n\t"
-               "mov %c[r8](%3),  %%r8  \n\t"
-               "mov %c[r9](%3),  %%r9  \n\t"
-               "mov %c[r10](%3), %%r10 \n\t"
-               "mov %c[r11](%3), %%r11 \n\t"
-               "mov %c[r12](%3), %%r12 \n\t"
-               "mov %c[r13](%3), %%r13 \n\t"
-               "mov %c[r14](%3), %%r14 \n\t"
-               "mov %c[r15](%3), %%r15 \n\t"
-               "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
+                  "mov %c[cr2](%3), %%rax \n\t" "mov %%rax, %%cr2 \n\t" "mov %c[rax](%3), %%rax \n\t" "mov %c[rbx](%3), %%rbx \n\t" "mov %c[rdx](%3), %%rdx \n\t" "mov %c[rsi](%3), %%rsi \n\t" "mov %c[rdi](%3), %%rdi \n\t" "mov %c[rbp](%3), %%rbp \n\t" "mov %c[r8](%3),  %%r8  \n\t" "mov %c[r9](%3),  %%r9  \n\t" "mov %c[r10](%3), %%r10 \n\t" "mov %c[r11](%3), %%r11 \n\t" "mov %c[r12](%3), %%r12 \n\t" "mov %c[r13](%3), %%r13 \n\t" "mov %c[r14](%3), %%r14 \n\t" "mov %c[r15](%3), %%r15 \n\t" "mov %c[rcx](%3), %%rcx \n\t"      /* kills %3 (rcx) */
 #else
-               "mov %c[cr2](%3), %%eax \n\t"
-               "mov %%eax,   %%cr2 \n\t"
-               "mov %c[rax](%3), %%eax \n\t"
-               "mov %c[rbx](%3), %%ebx \n\t"
-               "mov %c[rdx](%3), %%edx \n\t"
-               "mov %c[rsi](%3), %%esi \n\t"
-               "mov %c[rdi](%3), %%edi \n\t"
-               "mov %c[rbp](%3), %%ebp \n\t"
-               "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
+                  "mov %c[cr2](%3), %%eax \n\t" "mov %%eax,   %%cr2 \n\t" "mov %c[rax](%3), %%eax \n\t" "mov %c[rbx](%3), %%ebx \n\t" "mov %c[rdx](%3), %%edx \n\t" "mov %c[rsi](%3), %%esi \n\t" "mov %c[rdi](%3), %%edi \n\t" "mov %c[rbp](%3), %%ebp \n\t" "mov %c[rcx](%3), %%ecx \n\t"    /* kills %3 (ecx) */
 #endif
-               /* Enter guest mode */
-               "jne launched \n\t"
-               "vmlaunch \n\t"
-               "jmp litevm_vmx_return \n\t"
-               "launched: vmresume \n\t"
-               ".globl litevm_vmx_return \n\t"
-               "litevm_vmx_return: "
-               /* Save guest registers, load host registers, keep flags */
+                  /* Enter guest mode */
+                  "jne launched \n\t"
+                  "vmlaunch \n\t"
+                  "jmp litevm_vmx_return \n\t"
+                  "launched: vmresume \n\t"
+                  ".globl litevm_vmx_return \n\t" "litevm_vmx_return: "
+                  /* Save guest registers, load host registers, keep flags */
 #ifdef __x86_64__
-               "xchg %3,     0(%%rsp) \n\t"
-               "mov %%rax, %c[rax](%3) \n\t"
-               "mov %%rbx, %c[rbx](%3) \n\t"
-               "pushq 0(%%rsp); popq %c[rcx](%3) \n\t"
-               "mov %%rdx, %c[rdx](%3) \n\t"
-               "mov %%rsi, %c[rsi](%3) \n\t"
-               "mov %%rdi, %c[rdi](%3) \n\t"
-               "mov %%rbp, %c[rbp](%3) \n\t"
-               "mov %%r8,  %c[r8](%3) \n\t"
-               "mov %%r9,  %c[r9](%3) \n\t"
-               "mov %%r10, %c[r10](%3) \n\t"
-               "mov %%r11, %c[r11](%3) \n\t"
-               "mov %%r12, %c[r12](%3) \n\t"
-               "mov %%r13, %c[r13](%3) \n\t"
-               "mov %%r14, %c[r14](%3) \n\t"
-               "mov %%r15, %c[r15](%3) \n\t"
-               "mov %%cr2, %%rax   \n\t"
-               "mov %%rax, %c[cr2](%3) \n\t"
-               "mov 0(%%rsp), %3 \n\t"
-
-               "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
-               "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
-               "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
-               "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
+                  "xchg %3,     0(%%rsp) \n\t"
+                  "mov %%rax, %c[rax](%3) \n\t"
+                  "mov %%rbx, %c[rbx](%3) \n\t"
+                  "pushq 0(%%rsp); popq %c[rcx](%3) \n\t"
+                  "mov %%rdx, %c[rdx](%3) \n\t"
+                  "mov %%rsi, %c[rsi](%3) \n\t"
+                  "mov %%rdi, %c[rdi](%3) \n\t"
+                  "mov %%rbp, %c[rbp](%3) \n\t"
+                  "mov %%r8,  %c[r8](%3) \n\t"
+                  "mov %%r9,  %c[r9](%3) \n\t"
+                  "mov %%r10, %c[r10](%3) \n\t"
+