Add the skeleton for vm support
authorRonald G. Minnich <rminnich@google.com>
Fri, 3 Jan 2014 22:17:38 +0000 (14:17 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Fri, 17 Jan 2014 22:35:29 +0000 (14:35 -0800)
This code is from my litevm project, which is a redo of the early
kvm stuff as driver-only. That makes it easy to pull into akaros.
No need to driver-ize all of akaros for VMs. I'm not sure
we'd ever want to go that far anyway, it opens up the door to all
kinds of nasty attacks (as it has in Linux) that we don't want.

kern/drivers/dev/vm.c is an architecture-independent vm driver. Note that the
include files do not have an architecture type in the name (different
from linux) such that they can be included as arch/file and it will
work on any arch (which we need to keep in mind). This is
in the Plan 9 model of making things build in the architecture-independent
code when you build for an arch, by making the include file names
arch-independent.

Next steps are to keep bringing code over, which will live in
arch/x86.

All this is open to change once everyone is back from vacation
of course.

Suggestions on efficiently pulling in the vm image are welcome.

Signed-off-by: Ronald G. Minnich <rminnich@google.com>
kern/arch/x86/emulate.h [new file with mode: 0644]
kern/arch/x86/litevm.h [new file with mode: 0644]
kern/arch/x86/types.h
kern/arch/x86/vmdebug.h [new file with mode: 0644]
kern/arch/x86/vmx.h [new file with mode: 0644]
kern/drivers/dev/vm.c

diff --git a/kern/arch/x86/emulate.h b/kern/arch/x86/emulate.h
new file mode 100644 (file)
index 0000000..a9c2da0
--- /dev/null
@@ -0,0 +1,185 @@
+/******************************************************************************
+ * x86_emulate.h
+ *
+ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
+ *
+ * Copyright (c) 2005 Keir Fraser
+ *
+ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
+ */
+
+#ifndef __X86_EMULATE_H__
+#define __X86_EMULATE_H__
+
+struct x86_emulate_ctxt;
+
+/*
+ * x86_emulate_ops:
+ *
+ * These operations represent the instruction emulator's interface to memory.
+ * There are two categories of operation: those that act on ordinary memory
+ * regions (*_std), and those that act on memory regions known to require
+ * special treatment or emulation (*_emulated).
+ *
+ * The emulator assumes that an instruction accesses only one 'emulated memory'
+ * location, that this location is the given linear faulting address (cr2), and
+ * that this is one of the instruction's data operands. Instruction fetches and
+ * stack operations are assumed never to access emulated memory. The emulator
+ * automatically deduces which operand of a string-move operation is accessing
+ * emulated memory, and assumes that the other operand accesses normal memory.
+ *
+ * NOTES:
+ *  1. The emulator isn't very smart about emulated vs. standard memory.
+ *     'Emulated memory' access addresses should be checked for sanity.
+ *     'Normal memory' accesses may fault, and the caller must arrange to
+ *     detect and handle reentrancy into the emulator via recursive faults.
+ *     Accesses may be unaligned and may cross page boundaries.
+ *  2. If the access fails (cannot emulate, or a standard access faults) then
+ *     it is up to the memop to propagate the fault to the guest VM via
+ *     some out-of-band mechanism, unknown to the emulator. The memop signals
+ *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
+ *     then immediately bail.
+ *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
+ *     cmpxchg8b_emulated need support 8-byte accesses.
+ *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
+ */
+/* Access completed successfully: continue emulation as normal. */
+#define X86EMUL_CONTINUE        0
+/* Access is unhandleable: bail from emulation and return error to caller. */
+#define X86EMUL_UNHANDLEABLE    1
+/* Terminate emulation but return success to the caller. */
+#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
+#define X86EMUL_RETRY_INSTR     2 /* retry the instruction for some reason */
+#define X86EMUL_CMPXCHG_FAILED  2 /* cmpxchg did not see expected value */
+struct x86_emulate_ops {
+       /*
+        * read_std: Read bytes of standard (non-emulated/special) memory.
+        *           Used for instruction fetch, stack operations, and others.
+        *  @addr:  [IN ] Linear address from which to read.
+        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to read from memory.
+        */
+       int (*read_std)(unsigned long addr,
+                       unsigned long *val,
+                       unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * write_std: Write bytes of standard (non-emulated/special) memory.
+        *            Used for stack operations, and others.
+        *  @addr:  [IN ] Linear address to which to write.
+        *  @val:   [IN ] Value to write to memory (low-order bytes used as
+        *                required).
+        *  @bytes: [IN ] Number of bytes to write to memory.
+        */
+       int (*write_std)(unsigned long addr,
+                        unsigned long val,
+                        unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * read_emulated: Read bytes from emulated/special memory area.
+        *  @addr:  [IN ] Linear address from which to read.
+        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to read from memory.
+        */
+       int (*read_emulated) (unsigned long addr,
+                             unsigned long *val,
+                             unsigned int bytes,
+                             struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * write_emulated: Read bytes from emulated/special memory area.
+        *  @addr:  [IN ] Linear address to which to write.
+        *  @val:   [IN ] Value to write to memory (low-order bytes used as
+        *                required).
+        *  @bytes: [IN ] Number of bytes to write to memory.
+        */
+       int (*write_emulated) (unsigned long addr,
+                              unsigned long val,
+                              unsigned int bytes,
+                              struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
+        *                   emulated/special memory area.
+        *  @addr:  [IN ] Linear address to access.
+        *  @old:   [IN ] Value expected to be current at @addr.
+        *  @new:   [IN ] Value to write to @addr.
+        *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
+        */
+       int (*cmpxchg_emulated) (unsigned long addr,
+                                unsigned long old,
+                                unsigned long new,
+                                unsigned int bytes,
+                                struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * cmpxchg8b_emulated: Emulate an atomic (LOCKed) CMPXCHG8B operation on an
+        *                     emulated/special memory area.
+        *  @addr:  [IN ] Linear address to access.
+        *  @old:   [IN ] Value expected to be current at @addr.
+        *  @new:   [IN ] Value to write to @addr.
+        * NOTES:
+        *  1. This function is only ever called when emulating a real CMPXCHG8B.
+        *  2. This function is *never* called on x86/64 systems.
+        *  2. Not defining this function (i.e., specifying NULL) is equivalent
+        *     to defining a function that always returns X86EMUL_UNHANDLEABLE.
+        */
+       int (*cmpxchg8b_emulated) (unsigned long addr,
+                                  unsigned long old_lo,
+                                  unsigned long old_hi,
+                                  unsigned long new_lo,
+                                  unsigned long new_hi,
+                                  struct x86_emulate_ctxt * ctxt);
+};
+
+struct cpu_user_regs;
+
+struct x86_emulate_ctxt {
+       /* Register state before/after emulation. */
+       struct litevm_vcpu *vcpu;
+
+       /* Linear faulting address (if emulating a page-faulting instruction). */
+       unsigned long eflags;
+       unsigned long cr2;
+
+       /* Emulated execution mode, represented by an X86EMUL_MODE value. */
+       int mode;
+
+       unsigned long cs_base;
+       unsigned long ds_base;
+       unsigned long es_base;
+       unsigned long ss_base;
+       unsigned long gs_base;
+       unsigned long fs_base;
+};
+
+/* Execution mode, passed to the emulator. */
+#define X86EMUL_MODE_REAL     0        /* Real mode.             */
+#define X86EMUL_MODE_PROT16   2        /* 16-bit protected mode. */
+#define X86EMUL_MODE_PROT32   4        /* 32-bit protected mode. */
+#define X86EMUL_MODE_PROT64   8        /* 64-bit (long) mode.    */
+
+/* Host execution mode. */
+#if defined(__i386__)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
+#elif defined(__x86_64__)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
+#endif
+
+/*
+ * x86_emulate_memop: Emulate an instruction that faulted attempting to
+ *                    read/write a 'special' memory area.
+ * Returns -1 on failure, 0 on success.
+ */
+int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
+                     struct x86_emulate_ops *ops);
+
+/*
+ * Given the 'reg' portion of a ModRM byte, and a register block, return a
+ * pointer into the block that addresses the relevant register.
+ * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
+ */
+void *decode_register(uint8_t modrm_reg, unsigned long *regs,
+                     int highbyte_regs);
+
+#endif                         /* __X86_EMULATE_H__ */
diff --git a/kern/arch/x86/litevm.h b/kern/arch/x86/litevm.h
new file mode 100644 (file)
index 0000000..a938da1
--- /dev/null
@@ -0,0 +1,389 @@
+#ifndef __LITEVM_H
+#define __LITEVM_H
+#include <sys/queue.h>
+#include "vmx.h"
+
+#define CR0_PE_MASK (1ULL << 0)
+#define CR0_TS_MASK (1ULL << 3)
+#define CR0_NE_MASK (1ULL << 5)
+#define CR0_WP_MASK (1ULL << 16)
+#define CR0_NW_MASK (1ULL << 29)
+#define CR0_CD_MASK (1ULL << 30)
+#define CR0_PG_MASK (1ULL << 31)
+
+#define CR3_WPT_MASK (1ULL << 3)
+#define CR3_PCD_MASK (1ULL << 4)
+
+#define CR3_RESEVED_BITS 0x07ULL
+#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
+#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
+
+#define CR4_VME_MASK (1ULL << 0)
+#define CR4_PSE_MASK (1ULL << 4)
+#define CR4_PAE_MASK (1ULL << 5)
+#define CR4_PGE_MASK (1ULL << 7)
+#define CR4_VMXE_MASK (1ULL << 13)
+
+#define LITEVM_GUEST_CR0_MASK \
+       (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
+#define LITEVM_VM_CR0_ALWAYS_ON LITEVM_GUEST_CR0_MASK
+
+#define LITEVM_GUEST_CR4_MASK \
+       (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
+#define LITEVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
+#define LITEVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
+
+#define INVALID_PAGE (~(hpa_t)0)
+#define UNMAPPED_GVA (~(gpa_t)0)
+
+#define LITEVM_MAX_VCPUS 1
+#define LITEVM_MEMORY_SLOTS 4
+#define LITEVM_NUM_MMU_PAGES 256
+
+#define FX_IMAGE_SIZE 512
+#define FX_IMAGE_ALIGN 16
+#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
+
+#define DE_VECTOR 0
+#define DF_VECTOR 8
+#define TS_VECTOR 10
+#define NP_VECTOR 11
+#define SS_VECTOR 12
+#define GP_VECTOR 13
+#define PF_VECTOR 14
+
+#define SELECTOR_TI_MASK (1 << 2)
+#define SELECTOR_RPL_MASK 0x03
+
+#define IOPL_SHIFT 12
+
+/*
+ * Address types:
+ *
+ *  gva - guest virtual address
+ *  gpa - guest physical address
+ *  gfn - guest frame number
+ *  hva - host virtual address
+ *  hpa - host physical address
+ *  hfn - host frame number
+ */
+
+typedef unsigned long  gva_t;
+typedef uint64_t            gpa_t;
+typedef unsigned long  gfn_t;
+
+typedef unsigned long  hva_t;
+typedef uint64_t            hpa_t;
+typedef unsigned long  hfn_t;
+
+struct litevm_mmu_page {
+       LIST_HEAD(link, litevm_mmu_page) link;
+       hpa_t page_hpa;
+       unsigned long slot_bitmap; /* One bit set per slot which has memory
+                                   * in this shadow page.
+                                   */
+       int global;              /* Set if all ptes in this page are global */
+       uint64_t *parent_pte;
+};
+
+struct vmcs {
+       uint32_t revision_id;
+       uint32_t abort;
+       char data[0];
+};
+
+struct vmx_msr_entry {
+       uint32_t index;
+       uint32_t reserved;
+       uint64_t data;
+};
+
+struct litevm_vcpu;
+
+/*
+ * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
+ * 32-bit).  The litevm_mmu structure abstracts the details of the current mmu
+ * mode.
+ */
+struct litevm_mmu {
+       void (*new_cr3)(struct litevm_vcpu *vcpu);
+       int (*page_fault)(struct litevm_vcpu *vcpu, gva_t gva, uint32_t err);
+       void (*inval_page)(struct litevm_vcpu *vcpu, gva_t gva);
+       void (*free)(struct litevm_vcpu *vcpu);
+       gpa_t (*gva_to_gpa)(struct litevm_vcpu *vcpu, gva_t gva);
+       hpa_t root_hpa;
+       int root_level;
+       int shadow_root_level;
+};
+
+struct litevm_guest_debug {
+       int enabled;
+       unsigned long bp[4];
+       int singlestep;
+};
+
+enum {
+       VCPU_REGS_RAX = 0,
+       VCPU_REGS_RCX = 1,
+       VCPU_REGS_RDX = 2,
+       VCPU_REGS_RBX = 3,
+       VCPU_REGS_RSP = 4,
+       VCPU_REGS_RBP = 5,
+       VCPU_REGS_RSI = 6,
+       VCPU_REGS_RDI = 7,
+#ifdef __x86_64__
+       VCPU_REGS_R8 = 8,
+       VCPU_REGS_R9 = 9,
+       VCPU_REGS_R10 = 10,
+       VCPU_REGS_R11 = 11,
+       VCPU_REGS_R12 = 12,
+       VCPU_REGS_R13 = 13,
+       VCPU_REGS_R14 = 14,
+       VCPU_REGS_R15 = 15,
+#endif
+       NR_VCPU_REGS
+};
+
+struct litevm_vcpu {
+       struct litevm *litevm;
+       struct vmcs *vmcs;
+       qlock_t mutex; //struct mutex mutex;
+       int   cpu;
+       int   launched;
+       unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
+#define NR_IRQ_WORDS (256 / BITS_PER_LONG)
+       unsigned long irq_pending[NR_IRQ_WORDS];
+       unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
+       unsigned long rip;      /* needs vcpu_load_rsp_rip() */
+
+       unsigned long cr2;
+       unsigned long cr3;
+       unsigned long cr8;
+       uint64_t shadow_efer;
+       uint64_t apic_base;
+       int nmsrs;
+       struct vmx_msr_entry *guest_msrs;
+       struct vmx_msr_entry *host_msrs;
+       LIST_HEAD(free_pages, free_pages) free_pages;
+       //struct list_head free_pages;
+       struct litevm_mmu_page page_header_buf[LITEVM_NUM_MMU_PAGES];
+       struct litevm_mmu mmu;
+
+       struct litevm_guest_debug guest_debug;
+
+       char fx_buf[FX_BUF_SIZE];
+       char *host_fx_image;
+       char *guest_fx_image;
+
+       int mmio_needed;
+       int mmio_read_completed;
+       int mmio_is_write;
+       int mmio_size;
+       unsigned char mmio_data[8];
+       gpa_t mmio_phys_addr;
+
+       struct{
+               int active;
+               uint8_t save_iopl;
+               struct {
+                       unsigned long base;
+                       uint32_t limit;
+                       uint32_t ar;
+               } tr;
+       } rmode;
+};
+
+struct litevm_memory_slot {
+       gfn_t base_gfn;
+       unsigned long npages;
+       unsigned long flags;
+       struct page **phys_mem;
+       unsigned long *dirty_bitmap;
+};
+
+struct litevm {
+       spinlock_t lock; /* protects everything except vcpus */
+       int nmemslots;
+       struct litevm_memory_slot memslots[LITEVM_MEMORY_SLOTS];
+       LIST_HEAD(active_mmu_pages, active_mmu_pages) active_mmu_pages;
+       //struct list_head active_mmu_pages;
+       struct litevm_vcpu vcpus[LITEVM_MAX_VCPUS];
+       int memory_config_version;
+       int busy;
+};
+
+struct litevm_stat {
+       uint32_t pf_fixed;
+       uint32_t pf_guest;
+       uint32_t tlb_flush;
+       uint32_t invlpg;
+
+       uint32_t exits;
+       uint32_t io_exits;
+       uint32_t mmio_exits;
+       uint32_t signal_exits;
+       uint32_t irq_exits;
+};
+
+extern struct litevm_stat litevm_stat;
+
+#define litevm_printf(litevm, fmt ...) printk(KERN_DEBUG fmt)
+#define vcpu_printf(vcpu, fmt...) litevm_printf(vcpu->litevm, fmt)
+
+void litevm_mmu_destroy(struct litevm_vcpu *vcpu);
+int litevm_mmu_init(struct litevm_vcpu *vcpu);
+
+int litevm_mmu_reset_context(struct litevm_vcpu *vcpu);
+void litevm_mmu_slot_remove_write_access(struct litevm *litevm, int slot);
+
+hpa_t gpa_to_hpa(struct litevm_vcpu *vcpu, gpa_t gpa);
+#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
+#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
+static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
+hpa_t gva_to_hpa(struct litevm_vcpu *vcpu, gva_t gva);
+
+extern hpa_t bad_page_address;
+
+static inline struct page *gfn_to_page(struct litevm_memory_slot *slot, gfn_t gfn)
+{
+       return slot->phys_mem[gfn - slot->base_gfn];
+}
+
+struct litevm_memory_slot *gfn_to_memslot(struct litevm *litevm, gfn_t gfn);
+void mark_page_dirty(struct litevm *litevm, gfn_t gfn);
+
+void realmode_lgdt(struct litevm_vcpu *vcpu, uint16_t size, unsigned long address);
+void realmode_lidt(struct litevm_vcpu *vcpu, uint16_t size, unsigned long address);
+void realmode_lmsw(struct litevm_vcpu *vcpu, unsigned long msw,
+                  unsigned long *rflags);
+
+unsigned long realmode_get_cr(struct litevm_vcpu *vcpu, int cr);
+void realmode_set_cr(struct litevm_vcpu *vcpu, int cr, unsigned long value,
+                    unsigned long *rflags);
+
+int litevm_read_guest(struct litevm_vcpu *vcpu,
+              gva_t addr,
+              unsigned long size,
+              void *dest);
+
+int litevm_write_guest(struct litevm_vcpu *vcpu,
+               gva_t addr,
+               unsigned long size,
+               void *data);
+
+void vmcs_writel(unsigned long field, unsigned long value);
+unsigned long vmcs_readl(unsigned long field);
+
+static inline uint16_t vmcs_read16(unsigned long field)
+{
+       return vmcs_readl(field);
+}
+
+static inline uint32_t vmcs_read32(unsigned long field)
+{
+       return vmcs_readl(field);
+}
+
+static inline uint64_t vmcs_read64(unsigned long field)
+{
+#ifdef __x86_64__
+       return vmcs_readl(field);
+#else
+       return vmcs_readl(field) | ((uint64_t)vmcs_readl(field+1) << 32);
+#endif
+}
+
+static inline void vmcs_write32(unsigned long field, uint32_t value)
+{
+       vmcs_writel(field, value);
+}
+
+static inline int is_long_mode(void)
+{
+       return vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_CONTROLS_IA32E_MASK;
+}
+
+static inline unsigned long guest_cr4(void)
+{
+       return (vmcs_readl(CR4_READ_SHADOW) & LITEVM_GUEST_CR4_MASK) |
+               (vmcs_readl(GUEST_CR4) & ~LITEVM_GUEST_CR4_MASK);
+}
+
+static inline int is_pae(void)
+{
+       return guest_cr4() & CR4_PAE_MASK;
+}
+
+static inline int is_pse(void)
+{
+       return guest_cr4() & CR4_PSE_MASK;
+}
+
+static inline unsigned long guest_cr0(void)
+{
+       return (vmcs_readl(CR0_READ_SHADOW) & LITEVM_GUEST_CR0_MASK) |
+               (vmcs_readl(GUEST_CR0) & ~LITEVM_GUEST_CR0_MASK);
+}
+
+static inline unsigned guest_cpl(void)
+{
+       return vmcs_read16(GUEST_CS_SELECTOR) & SELECTOR_RPL_MASK;
+}
+
+static inline int is_paging(void)
+{
+       return guest_cr0() & CR0_PG_MASK;
+}
+
+static inline int is_page_fault(uint32_t intr_info)
+{
+       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+                            INTR_INFO_VALID_MASK)) ==
+               (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
+}
+
+static inline int is_external_interrupt(uint32_t intr_info)
+{
+       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+               == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+}
+
+static inline void flush_guest_tlb(struct litevm_vcpu *vcpu)
+{
+       vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
+}
+
+static inline int memslot_id(struct litevm *litevm, struct litevm_memory_slot *slot)
+{
+       return slot - litevm->memslots;
+}
+
+/* uh, what? */
+#warning "pfn_to_page is bogus"
+static inline uintptr_t pfn_to_page(unsigned long pfn)
+{
+       return pfn << PAGE_SHIFT;
+}
+static inline struct litevm_mmu_page *page_header(hpa_t shadow_page)
+{
+       struct page *page = (struct page *)pfn_to_page(shadow_page >> PAGE_SHIFT);
+
+       return (struct litevm_mmu_page *)page->pg_private;
+}
+
+#ifdef __x86_64__
+
+/*
+ * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.  Therefore
+ * we need to allocate shadow page tables in the first 4GB of memory, which
+ * happens to fit the DMA32 zone.
+ */
+#define GFP_LITEVM_MMU (GFP_KERNEL | __GFP_DMA32)
+
+#else
+
+#define GFP_LITEVM_MMU GFP_KERNEL
+
+#endif
+
+#endif
index ad45d84..50181ff 100644 (file)
@@ -29,15 +29,19 @@ typedef int gid_t;
 #ifdef CONFIG_X86_64
 
 #define NUM_ADDR_BITS 64
+#define BITS_PER_LONG 64
 #define MAX_VADDR     ((uint64_t)(~0) >> (64-NUM_ADDR_BITS))
 typedef uint64_t uintptr_t;
+#define PAGE_SHIFT 12
 
 #else /* 32 bit */
 
 #define NUM_ADDR_BITS 32
+#define BITS_PER_LONG 32
 #define MAX_VADDR     ((uint64_t)(~0) >> (64-NUM_ADDR_BITS))
 typedef uint32_t uintptr_t;
 
+#define PAGE_SHIFT 12
 #endif /* 64bit / 32bit */
 
 
diff --git a/kern/arch/x86/vmdebug.h b/kern/arch/x86/vmdebug.h
new file mode 100644 (file)
index 0000000..30b6548
--- /dev/null
@@ -0,0 +1,185 @@
+/******************************************************************************
+ * x86_emulate.h
+ *
+ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
+ *
+ * Copyright (c) 2005 Keir Fraser
+ *
+ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
+ */
+
+#ifndef __X86_EMULATE_H__
+#define __X86_EMULATE_H__
+
+struct x86_emulate_ctxt;
+
+/*
+ * x86_emulate_ops:
+ *
+ * These operations represent the instruction emulator's interface to memory.
+ * There are two categories of operation: those that act on ordinary memory
+ * regions (*_std), and those that act on memory regions known to require
+ * special treatment or emulation (*_emulated).
+ *
+ * The emulator assumes that an instruction accesses only one 'emulated memory'
+ * location, that this location is the given linear faulting address (cr2), and
+ * that this is one of the instruction's data operands. Instruction fetches and
+ * stack operations are assumed never to access emulated memory. The emulator
+ * automatically deduces which operand of a string-move operation is accessing
+ * emulated memory, and assumes that the other operand accesses normal memory.
+ *
+ * NOTES:
+ *  1. The emulator isn't very smart about emulated vs. standard memory.
+ *     'Emulated memory' access addresses should be checked for sanity.
+ *     'Normal memory' accesses may fault, and the caller must arrange to
+ *     detect and handle reentrancy into the emulator via recursive faults.
+ *     Accesses may be unaligned and may cross page boundaries.
+ *  2. If the access fails (cannot emulate, or a standard access faults) then
+ *     it is up to the memop to propagate the fault to the guest VM via
+ *     some out-of-band mechanism, unknown to the emulator. The memop signals
+ *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
+ *     then immediately bail.
+ *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
+ *     cmpxchg8b_emulated need support 8-byte accesses.
+ *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
+ */
+/* Access completed successfully: continue emulation as normal. */
+#define X86EMUL_CONTINUE        0
+/* Access is unhandleable: bail from emulation and return error to caller. */
+#define X86EMUL_UNHANDLEABLE    1
+/* Terminate emulation but return success to the caller. */
+#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
+#define X86EMUL_RETRY_INSTR     2 /* retry the instruction for some reason */
+#define X86EMUL_CMPXCHG_FAILED  2 /* cmpxchg did not see expected value */
+struct x86_emulate_ops {
+       /*
+        * read_std: Read bytes of standard (non-emulated/special) memory.
+        *           Used for instruction fetch, stack operations, and others.
+        *  @addr:  [IN ] Linear address from which to read.
+        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to read from memory.
+        */
+       int (*read_std)(unsigned long addr,
+                       unsigned long *val,
+                       unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * write_std: Write bytes of standard (non-emulated/special) memory.
+        *            Used for stack operations, and others.
+        *  @addr:  [IN ] Linear address to which to write.
+        *  @val:   [IN ] Value to write to memory (low-order bytes used as
+        *                required).
+        *  @bytes: [IN ] Number of bytes to write to memory.
+        */
+       int (*write_std)(unsigned long addr,
+                        unsigned long val,
+                        unsigned int bytes, struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * read_emulated: Read bytes from emulated/special memory area.
+        *  @addr:  [IN ] Linear address from which to read.
+        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to read from memory.
+        */
+       int (*read_emulated) (unsigned long addr,
+                             unsigned long *val,
+                             unsigned int bytes,
+                             struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * write_emulated: Read bytes from emulated/special memory area.
+        *  @addr:  [IN ] Linear address to which to write.
+        *  @val:   [IN ] Value to write to memory (low-order bytes used as
+        *                required).
+        *  @bytes: [IN ] Number of bytes to write to memory.
+        */
+       int (*write_emulated) (unsigned long addr,
+                              unsigned long val,
+                              unsigned int bytes,
+                              struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
+        *                   emulated/special memory area.
+        *  @addr:  [IN ] Linear address to access.
+        *  @old:   [IN ] Value expected to be current at @addr.
+        *  @new:   [IN ] Value to write to @addr.
+        *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
+        */
+       int (*cmpxchg_emulated) (unsigned long addr,
+                                unsigned long old,
+                                unsigned long new,
+                                unsigned int bytes,
+                                struct x86_emulate_ctxt * ctxt);
+
+       /*
+        * cmpxchg8b_emulated: Emulate an atomic (LOCKed) CMPXCHG8B operation on an
+        *                     emulated/special memory area.
+        *  @addr:  [IN ] Linear address to access.
+        *  @old:   [IN ] Value expected to be current at @addr.
+        *  @new:   [IN ] Value to write to @addr.
+        * NOTES:
+        *  1. This function is only ever called when emulating a real CMPXCHG8B.
+        *  2. This function is *never* called on x86/64 systems.
+        *  2. Not defining this function (i.e., specifying NULL) is equivalent
+        *     to defining a function that always returns X86EMUL_UNHANDLEABLE.
+        */
+       int (*cmpxchg8b_emulated) (unsigned long addr,
+                                  unsigned long old_lo,
+                                  unsigned long old_hi,
+                                  unsigned long new_lo,
+                                  unsigned long new_hi,
+                                  struct x86_emulate_ctxt * ctxt);
+};
+
+struct cpu_user_regs;
+
+struct x86_emulate_ctxt {
+       /* Register state before/after emulation. */
+       struct litevm_vcpu *vcpu;
+
+       /* Linear faulting address (if emulating a page-faulting instruction). */
+       unsigned long eflags;
+       unsigned long cr2;
+
+       /* Emulated execution mode, represented by an X86EMUL_MODE value. */
+       int mode;
+
+       unsigned long cs_base;
+       unsigned long ds_base;
+       unsigned long es_base;
+       unsigned long ss_base;
+       unsigned long gs_base;
+       unsigned long fs_base;
+};
+
+/* Execution mode, passed to the emulator. */
+#define X86EMUL_MODE_REAL     0        /* Real mode.             */
+#define X86EMUL_MODE_PROT16   2        /* 16-bit protected mode. */
+#define X86EMUL_MODE_PROT32   4        /* 32-bit protected mode. */
+#define X86EMUL_MODE_PROT64   8        /* 64-bit (long) mode.    */
+
+/* Host execution mode. */
+#if defined(__i386__)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
+#elif defined(__x86_64__)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
+#endif
+
+/*
+ * x86_emulate_memop: Emulate an instruction that faulted attempting to
+ *                    read/write a 'special' memory area.
+ * Returns -1 on failure, 0 on success.
+ */
+int x86_emulate_memop(struct x86_emulate_ctxt *ctxt,
+                     struct x86_emulate_ops *ops);
+
+/*
+ * Given the 'reg' portion of a ModRM byte, and a register block, return a
+ * pointer into the block that addresses the relevant register.
+ * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
+ */
+void *decode_register(u8 modrm_reg, unsigned long *regs,
+                     int highbyte_regs);
+
+#endif                         /* __X86_EMULATE_H__ */
diff --git a/kern/arch/x86/vmx.h b/kern/arch/x86/vmx.h
new file mode 100644 (file)
index 0000000..40f2c95
--- /dev/null
@@ -0,0 +1,287 @@
+#ifndef VMX_H
+#define VMX_H
+
+/*
+ * vmx.h: VMX Architecture related definitions
+ * Copyright (c) 2004, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * A few random additions are:
+ * Copyright (C) 2006 Qumranet
+ *    Avi Kivity <avi@qumranet.com>
+ *    Yaniv Kamay <yaniv@qumranet.com>
+ *
+ */
+
+#define CPU_BASED_VIRTUAL_INTR_PENDING  0x00000004
+#define CPU_BASED_USE_TSC_OFFSETING     0x00000008
+#define CPU_BASED_HLT_EXITING           0x00000080
+#define CPU_BASED_INVDPG_EXITING        0x00000200
+#define CPU_BASED_MWAIT_EXITING         0x00000400
+#define CPU_BASED_RDPMC_EXITING         0x00000800
+#define CPU_BASED_RDTSC_EXITING         0x00001000
+#define CPU_BASED_CR8_LOAD_EXITING      0x00080000
+#define CPU_BASED_CR8_STORE_EXITING     0x00100000
+#define CPU_BASED_TPR_SHADOW            0x00200000
+#define CPU_BASED_MOV_DR_EXITING        0x00800000
+#define CPU_BASED_UNCOND_IO_EXITING     0x01000000
+#define CPU_BASED_ACTIVATE_IO_BITMAP    0x02000000
+#define CPU_BASED_MSR_BITMAPS           0x10000000
+#define CPU_BASED_MONITOR_EXITING       0x20000000
+#define CPU_BASED_PAUSE_EXITING         0x40000000
+
+#define PIN_BASED_EXT_INTR_MASK 0x1
+#define PIN_BASED_NMI_EXITING   0x8
+
+#define VM_EXIT_ACK_INTR_ON_EXIT        0x00008000
+#define VM_EXIT_HOST_ADD_SPACE_SIZE     0x00000200
+
+
+/* VMCS Encodings */
+enum vmcs_field {
+       GUEST_ES_SELECTOR               = 0x00000800,
+       GUEST_CS_SELECTOR               = 0x00000802,
+       GUEST_SS_SELECTOR               = 0x00000804,
+       GUEST_DS_SELECTOR               = 0x00000806,
+       GUEST_FS_SELECTOR               = 0x00000808,
+       GUEST_GS_SELECTOR               = 0x0000080a,
+       GUEST_LDTR_SELECTOR             = 0x0000080c,
+       GUEST_TR_SELECTOR               = 0x0000080e,
+       HOST_ES_SELECTOR                = 0x00000c00,
+       HOST_CS_SELECTOR                = 0x00000c02,
+       HOST_SS_SELECTOR                = 0x00000c04,
+       HOST_DS_SELECTOR                = 0x00000c06,
+       HOST_FS_SELECTOR                = 0x00000c08,
+       HOST_GS_SELECTOR                = 0x00000c0a,
+       HOST_TR_SELECTOR                = 0x00000c0c,
+       IO_BITMAP_A                     = 0x00002000,
+       IO_BITMAP_A_HIGH                = 0x00002001,
+       IO_BITMAP_B                     = 0x00002002,
+       IO_BITMAP_B_HIGH                = 0x00002003,
+       MSR_BITMAP                      = 0x00002004,
+       MSR_BITMAP_HIGH                 = 0x00002005,
+       VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
+       VM_EXIT_MSR_STORE_ADDR_HIGH     = 0x00002007,
+       VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
+       VM_EXIT_MSR_LOAD_ADDR_HIGH      = 0x00002009,
+       VM_ENTRY_MSR_LOAD_ADDR          = 0x0000200a,
+       VM_ENTRY_MSR_LOAD_ADDR_HIGH     = 0x0000200b,
+       TSC_OFFSET                      = 0x00002010,
+       TSC_OFFSET_HIGH                 = 0x00002011,
+       VIRTUAL_APIC_PAGE_ADDR          = 0x00002012,
+       VIRTUAL_APIC_PAGE_ADDR_HIGH     = 0x00002013,
+       VMCS_LINK_POINTER               = 0x00002800,
+       VMCS_LINK_POINTER_HIGH          = 0x00002801,
+       GUEST_IA32_DEBUGCTL             = 0x00002802,
+       GUEST_IA32_DEBUGCTL_HIGH        = 0x00002803,
+       PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
+       CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
+       EXCEPTION_BITMAP                = 0x00004004,
+       PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
+       PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
+       CR3_TARGET_COUNT                = 0x0000400a,
+       VM_EXIT_CONTROLS                = 0x0000400c,
+       VM_EXIT_MSR_STORE_COUNT         = 0x0000400e,
+       VM_EXIT_MSR_LOAD_COUNT          = 0x00004010,
+       VM_ENTRY_CONTROLS               = 0x00004012,
+       VM_ENTRY_MSR_LOAD_COUNT         = 0x00004014,
+       VM_ENTRY_INTR_INFO_FIELD        = 0x00004016,
+       VM_ENTRY_EXCEPTION_ERROR_CODE   = 0x00004018,
+       VM_ENTRY_INSTRUCTION_LEN        = 0x0000401a,
+       TPR_THRESHOLD                   = 0x0000401c,
+       SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
+       VM_INSTRUCTION_ERROR            = 0x00004400,
+       VM_EXIT_REASON                  = 0x00004402,
+       VM_EXIT_INTR_INFO               = 0x00004404,
+       VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
+       IDT_VECTORING_INFO_FIELD        = 0x00004408,
+       IDT_VECTORING_ERROR_CODE        = 0x0000440a,
+       VM_EXIT_INSTRUCTION_LEN         = 0x0000440c,
+       VMX_INSTRUCTION_INFO            = 0x0000440e,
+       GUEST_ES_LIMIT                  = 0x00004800,
+       GUEST_CS_LIMIT                  = 0x00004802,
+       GUEST_SS_LIMIT                  = 0x00004804,
+       GUEST_DS_LIMIT                  = 0x00004806,
+       GUEST_FS_LIMIT                  = 0x00004808,
+       GUEST_GS_LIMIT                  = 0x0000480a,
+       GUEST_LDTR_LIMIT                = 0x0000480c,
+       GUEST_TR_LIMIT                  = 0x0000480e,
+       GUEST_GDTR_LIMIT                = 0x00004810,
+       GUEST_IDTR_LIMIT                = 0x00004812,
+       GUEST_ES_AR_BYTES               = 0x00004814,
+       GUEST_CS_AR_BYTES               = 0x00004816,
+       GUEST_SS_AR_BYTES               = 0x00004818,
+       GUEST_DS_AR_BYTES               = 0x0000481a,
+       GUEST_FS_AR_BYTES               = 0x0000481c,
+       GUEST_GS_AR_BYTES               = 0x0000481e,
+       GUEST_LDTR_AR_BYTES             = 0x00004820,
+       GUEST_TR_AR_BYTES               = 0x00004822,
+       GUEST_INTERRUPTIBILITY_INFO     = 0x00004824,
+       GUEST_ACTIVITY_STATE            = 0X00004826,
+       GUEST_SYSENTER_CS               = 0x0000482A,
+       HOST_IA32_SYSENTER_CS           = 0x00004c00,
+       CR0_GUEST_HOST_MASK             = 0x00006000,
+       CR4_GUEST_HOST_MASK             = 0x00006002,
+       CR0_READ_SHADOW                 = 0x00006004,
+       CR4_READ_SHADOW                 = 0x00006006,
+       CR3_TARGET_VALUE0               = 0x00006008,
+       CR3_TARGET_VALUE1               = 0x0000600a,
+       CR3_TARGET_VALUE2               = 0x0000600c,
+       CR3_TARGET_VALUE3               = 0x0000600e,
+       EXIT_QUALIFICATION              = 0x00006400,
+       GUEST_LINEAR_ADDRESS            = 0x0000640a,
+       GUEST_CR0                       = 0x00006800,
+       GUEST_CR3                       = 0x00006802,
+       GUEST_CR4                       = 0x00006804,
+       GUEST_ES_BASE                   = 0x00006806,
+       GUEST_CS_BASE                   = 0x00006808,
+       GUEST_SS_BASE                   = 0x0000680a,
+       GUEST_DS_BASE                   = 0x0000680c,
+       GUEST_FS_BASE                   = 0x0000680e,
+       GUEST_GS_BASE                   = 0x00006810,
+       GUEST_LDTR_BASE                 = 0x00006812,
+       GUEST_TR_BASE                   = 0x00006814,
+       GUEST_GDTR_BASE                 = 0x00006816,
+       GUEST_IDTR_BASE                 = 0x00006818,
+       GUEST_DR7                       = 0x0000681a,
+       GUEST_RSP                       = 0x0000681c,
+       GUEST_RIP                       = 0x0000681e,
+       GUEST_RFLAGS                    = 0x00006820,
+       GUEST_PENDING_DBG_EXCEPTIONS    = 0x00006822,
+       GUEST_SYSENTER_ESP              = 0x00006824,
+       GUEST_SYSENTER_EIP              = 0x00006826,
+       HOST_CR0                        = 0x00006c00,
+       HOST_CR3                        = 0x00006c02,
+       HOST_CR4                        = 0x00006c04,
+       HOST_FS_BASE                    = 0x00006c06,
+       HOST_GS_BASE                    = 0x00006c08,
+       HOST_TR_BASE                    = 0x00006c0a,
+       HOST_GDTR_BASE                  = 0x00006c0c,
+       HOST_IDTR_BASE                  = 0x00006c0e,
+       HOST_IA32_SYSENTER_ESP          = 0x00006c10,
+       HOST_IA32_SYSENTER_EIP          = 0x00006c12,
+       HOST_RSP                        = 0x00006c14,
+       HOST_RIP                        = 0x00006c16,
+};
+
+#define VMX_EXIT_REASONS_FAILED_VMENTRY         0x80000000
+
+#define EXIT_REASON_EXCEPTION_NMI       0
+#define EXIT_REASON_EXTERNAL_INTERRUPT  1
+
+#define EXIT_REASON_PENDING_INTERRUPT   7
+
+#define EXIT_REASON_TASK_SWITCH         9
+#define EXIT_REASON_CPUID               10
+#define EXIT_REASON_HLT                 12
+#define EXIT_REASON_INVLPG              14
+#define EXIT_REASON_RDPMC               15
+#define EXIT_REASON_RDTSC               16
+#define EXIT_REASON_VMCALL              18
+#define EXIT_REASON_VMCLEAR             19
+#define EXIT_REASON_VMLAUNCH            20
+#define EXIT_REASON_VMPTRLD             21
+#define EXIT_REASON_VMPTRST             22
+#define EXIT_REASON_VMREAD              23
+#define EXIT_REASON_VMRESUME            24
+#define EXIT_REASON_VMWRITE             25
+#define EXIT_REASON_VMOFF               26
+#define EXIT_REASON_VMON                27
+#define EXIT_REASON_CR_ACCESS           28
+#define EXIT_REASON_DR_ACCESS           29
+#define EXIT_REASON_IO_INSTRUCTION      30
+#define EXIT_REASON_MSR_READ            31
+#define EXIT_REASON_MSR_WRITE           32
+#define EXIT_REASON_MWAIT_INSTRUCTION   36
+
+/*
+ * Interruption-information format
+ */
+#define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
+#define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
+#define INTR_INFO_DELIEVER_CODE_MASK    0x800           /* 11 */
+#define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
+
+#define VECTORING_INFO_VECTOR_MASK             INTR_INFO_VECTOR_MASK
+#define VECTORING_INFO_TYPE_MASK               INTR_INFO_INTR_TYPE_MASK
+#define VECTORING_INFO_DELIEVER_CODE_MASK      INTR_INFO_DELIEVER_CODE_MASK
+#define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
+
+#define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
+#define INTR_TYPE_EXCEPTION             (3 << 8) /* processor exception */
+
+/*
+ * Exit Qualifications for MOV for Control Register Access
+ */
+#define CONTROL_REG_ACCESS_NUM          0x7     /* 2:0, number of control register */
+#define CONTROL_REG_ACCESS_TYPE         0x30    /* 5:4, access type */
+#define CONTROL_REG_ACCESS_REG          0xf00   /* 10:8, general purpose register */
+#define LMSW_SOURCE_DATA_SHIFT 16
+#define LMSW_SOURCE_DATA  (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
+#define REG_EAX                         (0 << 8)
+#define REG_ECX                         (1 << 8)
+#define REG_EDX                         (2 << 8)
+#define REG_EBX                         (3 << 8)
+#define REG_ESP                         (4 << 8)
+#define REG_EBP                         (5 << 8)
+#define REG_ESI                         (6 << 8)
+#define REG_EDI                         (7 << 8)
+#define REG_R8                         (8 << 8)
+#define REG_R9                         (9 << 8)
+#define REG_R10                        (10 << 8)
+#define REG_R11                        (11 << 8)
+#define REG_R12                        (12 << 8)
+#define REG_R13                        (13 << 8)
+#define REG_R14                        (14 << 8)
+#define REG_R15                        (15 << 8)
+
+/*
+ * Exit Qualifications for MOV for Debug Register Access
+ */
+#define DEBUG_REG_ACCESS_NUM            0x7     /* 2:0, number of debug register */
+#define DEBUG_REG_ACCESS_TYPE           0x10    /* 4, direction of access */
+#define TYPE_MOV_TO_DR                  (0 << 4)
+#define TYPE_MOV_FROM_DR                (1 << 4)
+#define DEBUG_REG_ACCESS_REG            0xf00   /* 11:8, general purpose register */
+
+
+/* segment AR */
+#define SEGMENT_AR_L_MASK (1 << 13)
+
+/* entry controls */
+#define VM_ENTRY_CONTROLS_IA32E_MASK (1 << 9)
+
+#define AR_TYPE_ACCESSES_MASK 1
+#define AR_TYPE_READABLE_MASK (1 << 1)
+#define AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define AR_TYPE_CODE_MASK (1 << 3)
+#define AR_TYPE_MASK 0x0f
+#define AR_TYPE_BUSY_64_TSS 11
+#define AR_TYPE_BUSY_32_TSS 11
+#define AR_TYPE_BUSY_16_TSS 3
+#define AR_TYPE_LDT 2
+
+#define AR_UNUSABLE_MASK (1 << 16)
+#define AR_S_MASK (1 << 4)
+#define AR_P_MASK (1 << 7)
+#define AR_L_MASK (1 << 13)
+#define AR_DB_MASK (1 << 14)
+#define AR_G_MASK (1 << 15)
+#define AR_DPL_SHIFT 5
+#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
+
+#define AR_RESERVD_MASK 0xfffe0f00
+
+#endif
index 2427a18..39cfc20 100644 (file)
 #include <event.h>
 #include <umem.h>
 #include <devalarm.h>
+#include <arch/types.h>
+#include <arch/litevm.h>
+#include <arch/emulate.h>
+#include <arch/vmdebug.h>
 
 /* qid path types */
 enum {