vm mmu support
authorRonald G. Minnich <rminnich@google.com>
Mon, 6 Jan 2014 23:52:32 +0000 (15:52 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Fri, 17 Jan 2014 22:35:29 +0000 (14:35 -0800)
Signed-off-by: Ronald G. Minnich <rminnich@google.com>
kern/arch/x86/Kbuild
kern/arch/x86/paging_tmpl.h [new file with mode: 0644]
kern/arch/x86/vm.c
kern/arch/x86/vm.h
kern/arch/x86/vm_mmu.c [new file with mode: 0644]

index f715a05..1b61cf8 100644 (file)
@@ -22,4 +22,5 @@ obj-y                                         += smp_boot.o
 obj-y                                          += smp_entry$(BITS).o
 obj-y                                          += trap.o trap$(BITS).o
 obj-y                                          += trapentry$(BITS).o
-#obj-y                                         += vm.o
+obj-y                                          += vm.o
+obj-y                                          += vm_mmu.o
diff --git a/kern/arch/x86/paging_tmpl.h b/kern/arch/x86/paging_tmpl.h
new file mode 100644 (file)
index 0000000..2fda84c
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+ * We need the mmu code to access both 32-bit and 64-bit guest ptes,
+ * so the code in this file is compiled twice, once per pte size.
+ */
+
+#if PTTYPE == 64
+       #define pt_element_t uint64_t
+       #define guest_walker guest_walker64
+       #define FNAME(name) paging##64_##name
+       #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+       #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
+       #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+       #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+       #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
+       #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
+       #define PT_NON_PTE_COPY_MASK PT64_NON_PTE_COPY_MASK
+#elif PTTYPE == 32
+       #define pt_element_t uint32_t
+       #define guest_walker guest_walker32
+       #define FNAME(name) paging##32_##name
+       #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
+       #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
+       #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
+       #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+       #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
+       #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
+       #define PT_NON_PTE_COPY_MASK PT32_NON_PTE_COPY_MASK
+#else
+       #error Invalid PTTYPE value
+#endif
+
+/*
+ * The guest_walker structure emulates the behavior of the hardware page
+ * table walker.
+ */
+struct guest_walker {
+       int level;
+       pt_element_t *table;
+       pt_element_t inherited_ar;
+};
+
+static void FNAME(init_walker)(struct guest_walker *walker,
+                              struct litevm_vcpu *vcpu)
+{
+       hpa_t hpa;
+       struct litevm_memory_slot *slot;
+
+       walker->level = vcpu->mmu.root_level;
+       slot = gfn_to_memslot(vcpu->litevm,
+                             (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+       hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
+       walker->table = kmap_atomic(ppn2page(hpa >> PAGE_SHIFT));
+
+       ASSERT((!is_long_mode() && is_pae()) ||
+              (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
+
+       walker->table = (pt_element_t *)( (unsigned long)walker->table |
+               (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
+       walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
+}
+
+static void FNAME(release_walker)(struct guest_walker *walker)
+{
+       kunmap_atomic(walker->table);
+}
+
+static void FNAME(set_pte)(struct litevm_vcpu *vcpu, uint64_t guest_pte,
+                          uint64_t *shadow_pte, uint64_t access_bits)
+{
+       ASSERT(*shadow_pte == 0);
+       access_bits &= guest_pte;
+       *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
+       set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
+                      guest_pte & PT_DIRTY_MASK, access_bits);
+}
+
+static void FNAME(set_pde)(struct litevm_vcpu *vcpu, uint64_t guest_pde,
+                          uint64_t *shadow_pte, uint64_t access_bits,
+                          int index)
+{
+       gpa_t gaddr;
+
+       ASSERT(*shadow_pte == 0);
+       access_bits &= guest_pde;
+       gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index;
+       if (PTTYPE == 32 && is_cpuid_PSE36())
+               gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
+                       (32 - PT32_DIR_PSE36_SHIFT);
+       *shadow_pte = (guest_pde & PT_NON_PTE_COPY_MASK) |
+                         ((guest_pde & PT_DIR_PAT_MASK) >>
+                                   (PT_DIR_PAT_SHIFT - PT_PAT_SHIFT));
+       set_pte_common(vcpu, shadow_pte, gaddr,
+                      guest_pde & PT_DIRTY_MASK, access_bits);
+}
+
+/*
+ * Fetch a guest pte from a specific level in the paging hierarchy.
+ */
+static pt_element_t *FNAME(fetch_guest)(struct litevm_vcpu *vcpu,
+                                       struct guest_walker *walker,
+                                       int level,
+                                       gva_t addr)
+{
+
+       ASSERT(level > 0  && level <= walker->level);
+
+       for (;;) {
+               int index = PT_INDEX(addr, walker->level);
+               hpa_t paddr;
+
+               ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
+                      ((unsigned long)&walker->table[index] & PAGE_MASK));
+               if (level == walker->level ||
+                   !is_present_pte(walker->table[index]) ||
+                   (walker->level == PT_DIRECTORY_LEVEL &&
+                    (walker->table[index] & PT_PAGE_SIZE_MASK) &&
+                    (PTTYPE == 64 || is_pse())))
+                       return &walker->table[index];
+               if (walker->level != 3 || is_long_mode())
+                       walker->inherited_ar &= walker->table[index];
+               paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK);
+               kunmap_atomic(walker->table);
+               walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT));
+               --walker->level;
+       }
+}
+
+/*
+ * Fetch a shadow pte for a specific level in the paging hierarchy.
+ */
+static uint64_t *FNAME(fetch)(struct litevm_vcpu *vcpu, gva_t addr,
+                             struct guest_walker *walker)
+{
+       hpa_t shadow_addr;
+       int level;
+       uint64_t *prev_shadow_ent = NULL;
+
+       shadow_addr = vcpu->mmu.root_hpa;
+       level = vcpu->mmu.shadow_root_level;
+
+       for (; ; level--) {
+               uint32_t index = SHADOW_PT_INDEX(addr, level);
+               uint64_t *shadow_ent = ((uint64_t *)KADDR(shadow_addr)) + index;
+               pt_element_t *guest_ent;
+
+               if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
+                       if (level == PT_PAGE_TABLE_LEVEL)
+                               return shadow_ent;
+                       shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
+                       prev_shadow_ent = shadow_ent;
+                       continue;
+               }
+
+               if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) {
+                       ASSERT(level == PT32E_ROOT_LEVEL);
+                       guest_ent = FNAME(fetch_guest)(vcpu, walker,
+                                                      PT32_ROOT_LEVEL, addr);
+               } else
+                       guest_ent = FNAME(fetch_guest)(vcpu, walker,
+                                                      level, addr);
+
+               if (!is_present_pte(*guest_ent))
+                       return NULL;
+
+               /* Don't set accessed bit on PAE PDPTRs */
+               if (vcpu->mmu.root_level != 3 || walker->level != 3)
+                       *guest_ent |= PT_ACCESSED_MASK;
+
+               if (level == PT_PAGE_TABLE_LEVEL) {
+
+                       if (walker->level == PT_DIRECTORY_LEVEL) {
+                               if (prev_shadow_ent)
+                                       *prev_shadow_ent |= PT_SHADOW_PS_MARK;
+                               FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
+                                              walker->inherited_ar,
+                                         PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
+                       } else {
+                               ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
+                               FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
+                       }
+                       return shadow_ent;
+               }
+
+               shadow_addr = litevm_mmu_alloc_page(vcpu, shadow_ent);
+               if (!VALID_PAGE(shadow_addr))
+                       return ERR_PTR(-ENOMEM);
+               if (!is_long_mode() && level == 3)
+                       *shadow_ent = shadow_addr |
+                               (*guest_ent & (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK));
+               else {
+                       *shadow_ent = shadow_addr |
+                               (*guest_ent & PT_NON_PTE_COPY_MASK);
+                       *shadow_ent |= (PT_WRITABLE_MASK | PT_USER_MASK);
+               }
+               prev_shadow_ent = shadow_ent;
+       }
+}
+
+/*
+ * The guest faulted for write.  We need to
+ *
+ * - check write permissions
+ * - update the guest pte dirty bit
+ * - update our own dirty page tracking structures
+ */
+static int FNAME(fix_write_pf)(struct litevm_vcpu *vcpu,
+                              uint64_t *shadow_ent,
+                              struct guest_walker *walker,
+                              gva_t addr,
+                              int user)
+{
+       pt_element_t *guest_ent;
+       int writable_shadow;
+       gfn_t gfn;
+
+       if (is_writeble_pte(*shadow_ent))
+               return 0;
+
+       writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
+       if (user) {
+               /*
+                * User mode access.  Fail if it's a kernel page or a read-only
+                * page.
+                */
+               if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
+                       return 0;
+               ASSERT(*shadow_ent & PT_USER_MASK);
+       } else
+               /*
+                * Kernel mode access.  Fail if it's a read-only page and
+                * supervisor write protection is enabled.
+                */
+               if (!writable_shadow) {
+                       if (is_write_protection())
+                               return 0;
+                       *shadow_ent &= ~PT_USER_MASK;
+               }
+
+       guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr);
+
+       if (!is_present_pte(*guest_ent)) {
+               *shadow_ent = 0;
+               return 0;
+       }
+
+       gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+       mark_page_dirty(vcpu->litevm, gfn);
+       *shadow_ent |= PT_WRITABLE_MASK;
+       *guest_ent |= PT_DIRTY_MASK;
+
+       return 1;
+}
+
+/*
+ * Page fault handler.  There are several causes for a page fault:
+ *   - there is no shadow pte for the guest pte
+ *   - write access through a shadow pte marked read only so that we can set
+ *     the dirty bit
+ *   - write access to a shadow pte marked read only so we can update the page
+ *     dirty bitmap, when userspace requests it
+ *   - mmio access; in this case we will never install a present shadow pte
+ *   - normal guest page fault due to the guest pte marked not present, not
+ *     writable, or not executable
+ *
+ *  Returns: 1 if we need to emulate the instruction, 0 otherwise
+ */
+static int FNAME(page_fault)(struct litevm_vcpu *vcpu, gva_t addr,
+                              uint32_t error_code)
+{
+       int write_fault = error_code & PFERR_WRITE_MASK;
+       int pte_present = error_code & PFERR_PRESENT_MASK;
+       int user_fault = error_code & PFERR_USER_MASK;
+       struct guest_walker walker;
+       uint64_t *shadow_pte;
+       int fixed;
+
+       /*
+        * Look up the shadow pte for the faulting address.
+        */
+       for (;;) {
+               FNAME(init_walker)(&walker, vcpu);
+               shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
+               if (IS_ERR(shadow_pte)) {  /* must be -ENOMEM */
+                       nonpaging_flush(vcpu);
+                       FNAME(release_walker)(&walker);
+                       continue;
+               }
+               break;
+       }
+
+       /*
+        * The page is not mapped by the guest.  Let the guest handle it.
+        */
+       if (!shadow_pte) {
+               inject_page_fault(vcpu, addr, error_code);
+               FNAME(release_walker)(&walker);
+               return 0;
+       }
+
+       /*
+        * Update the shadow pte.
+        */
+       if (write_fault)
+               fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
+                                           user_fault);
+       else
+               fixed = fix_read_pf(shadow_pte);
+
+       FNAME(release_walker)(&walker);
+
+       /*
+        * mmio: emulate if accessible, otherwise its a guest fault.
+        */
+       if (is_io_pte(*shadow_pte)) {
+               if (may_access(*shadow_pte, write_fault, user_fault))
+                       return 1;
+               pgprintk("%s: io work, no access\n", __FUNCTION__);
+               inject_page_fault(vcpu, addr,
+                                 error_code | PFERR_PRESENT_MASK);
+               return 0;
+       }
+
+       /*
+        * pte not present, guest page fault.
+        */
+       if (pte_present && !fixed) {
+               inject_page_fault(vcpu, addr, error_code);
+               return 0;
+       }
+
+       ++litevm_stat.pf_fixed;
+
+       return 0;
+}
+
+static gpa_t FNAME(gva_to_gpa)(struct litevm_vcpu *vcpu, gva_t vaddr)
+{
+       struct guest_walker walker;
+       pt_element_t guest_pte;
+       gpa_t gpa;
+
+       FNAME(init_walker)(&walker, vcpu);
+       guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL,
+                                       vaddr);
+       FNAME(release_walker)(&walker);
+
+       if (!is_present_pte(guest_pte))
+               return UNMAPPED_GVA;
+
+       if (walker.level == PT_DIRECTORY_LEVEL) {
+               ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
+               ASSERT(PTTYPE == 64 || is_pse());
+
+               gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
+                       (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));
+
+               if (PTTYPE == 32 && is_cpuid_PSE36())
+                       gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
+                                       (32 - PT32_DIR_PSE36_SHIFT);
+       } else {
+               gpa = (guest_pte & PT_BASE_ADDR_MASK);
+               gpa |= (vaddr & ~PAGE_MASK);
+       }
+
+       return gpa;
+}
+
+#undef pt_element_t
+#undef guest_walker
+#undef FNAME
+#undef PT_BASE_ADDR_MASK
+#undef PT_INDEX
+#undef SHADOW_PT_INDEX
+#undef PT_LEVEL_MASK
+#undef PT_PTE_COPY_MASK
+#undef PT_NON_PTE_COPY_MASK
+#undef PT_DIR_BASE_ADDR_MASK
index c08acf8..9303e7e 100644 (file)
@@ -357,8 +357,7 @@ static void setup_vmcs_descriptor(void)
 
 static void vmcs_clear(struct vmcs *vmcs)
 {
-#warning "__pa"
-       uint64_t phys_addr = 0;//;__pa(vmcs);
+       uint64_t phys_addr = PADDR(vmcs);
        uint8_t error;
 
        asm volatile ("vmclear %1; setna %0"
@@ -391,8 +390,7 @@ static int vcpu_slot(struct litevm_vcpu *vcpu)
  */
 static struct litevm_vcpu *__vcpu_load(struct litevm_vcpu *vcpu)
 {
-#warning "__pa"
-       uint64_t phys_addr = 0; //__pa(vcpu->vmcs);
+       uint64_t phys_addr = PADDR(vcpu->vmcs);
        int cpu;
        cpu = core_id();
 
@@ -439,9 +437,9 @@ static struct litevm_vcpu *vcpu_load(struct litevm *litevm, int vcpu_slot)
 {
        struct litevm_vcpu *vcpu = &litevm->vcpus[vcpu_slot];
 
-       mutex_lock(&vcpu->mutex);
-       if (unlikely(!vcpu->vmcs)) {
-               mutex_unlock(&vcpu->mutex);
+       qlock(&vcpu->mutex);
+       if (!vcpu->vmcs) {
+               qunlock(&vcpu->mutex);
                return 0;
        }
        return __vcpu_load(vcpu);
@@ -450,7 +448,7 @@ static struct litevm_vcpu *vcpu_load(struct litevm *litevm, int vcpu_slot)
 static void vcpu_put(struct litevm_vcpu *vcpu)
 {
        put_cpu();
-       mutex_unlock(&vcpu->mutex);
+       qunlock(&vcpu->mutex);
 }
 
 
@@ -469,7 +467,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
 
 static struct vmcs *alloc_vmcs(void)
 {
-       return alloc_vmcs_cpu(smp_processor_id());
+       return alloc_vmcs_cpu(core_id());
 }
 
 static void free_vmcs(struct vmcs *vmcs)
@@ -520,21 +518,20 @@ static int vmx_disabled_by_bios(void)
 {
        uint64_t msr;
 
-       rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+       msr = read_msr(MSR_IA32_FEATURE_CONTROL);
        return (msr & 5) == 1; /* locked but not enabled */
 }
 
 static void litevm_enable(void *garbage)
 {
        int cpu = raw_smp_processor_id();
-#warning "Need PA of vmxarea"
-       uint64_t phys_addr = NULL; // __pa(per_cpu(vmxarea, cpu));
+       uint64_t phys_addr = PADDR(&currentcpu->vmxarea);
        uint64_t old;
 
-       rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
+       old = read_msr(MSR_IA32_FEATURE_CONTROL);
        if ((old & 5) == 0)
                /* enable and lock */
-               wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
+               write_msr(MSR_IA32_FEATURE_CONTROL, old | 5);
        write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
        asm volatile ("vmxon %0" : : "m"(phys_addr) : "memory", "cc");
 }
@@ -558,9 +555,10 @@ static int litevm_dev_open(struct inode *inode, struct file *filp)
        for (i = 0; i < LITEVM_MAX_VCPUS; ++i) {
                struct litevm_vcpu *vcpu = &litevm->vcpus[i];
 
-               mutex_init(&vcpu->mutex);
+               qlock_init(&vcpu->mutex);
                vcpu->mmu.root_hpa = INVALID_PAGE;
-               INIT_LIST_HEAD(&vcpu->free_pages);
+#warning "init list head"
+//             INIT_LIST_HEAD(&vcpu->free_pages);
        }
 #warning "filp->private data -- > c->aux?"
 //     filp->private_data = litevm;
@@ -1372,10 +1370,10 @@ static int litevm_dev_ioctl_create_vcpu(struct litevm *litevm, int n)
 
        vcpu = &litevm->vcpus[n];
 
-       mutex_lock(&vcpu->mutex);
+       qlock(&vcpu->mutex);
 
        if (vcpu->vmcs) {
-               mutex_unlock(&vcpu->mutex);
+               qunlock(&vcpu->mutex);
                return -EEXIST;
        }
 
@@ -1387,7 +1385,7 @@ static int litevm_dev_ioctl_create_vcpu(struct litevm *litevm, int n)
        vcpu->litevm = litevm;
        vmcs = alloc_vmcs();
        if (!vmcs) {
-               mutex_unlock(&vcpu->mutex);
+               qunlock(&vcpu->mutex);
                goto out_free_vcpus;
        }
        vmcs_clear(vmcs);
@@ -1554,6 +1552,7 @@ out:
        return r;
 }
 
+#if 0
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
@@ -1615,6 +1614,7 @@ out:
        spin_unlock(&litevm->lock);
        return r;
 }
+#endif
 
 struct litevm_memory_slot *gfn_to_memslot(struct litevm *litevm, gfn_t gfn)
 {
@@ -3384,8 +3384,8 @@ out:
 
 static void litevm_exit(void)
 {
-       free_litevm_area();
-       __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
+       //free_litevm_area();
+       //__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
 }
 
 
index b07c5ad..ff19026 100644 (file)
@@ -79,7 +79,7 @@ typedef uint64_t            hpa_t;
 typedef unsigned long  hfn_t;
 
 struct litevm_mmu_page {
-       LIST_HEAD(link, litevm_mmu_page) link;
+       LIST_ENTRY(litevm_mmu_page) link;
        hpa_t page_hpa;
        unsigned long slot_bitmap; /* One bit set per slot which has memory
                                    * in this shadow page.
@@ -149,7 +149,7 @@ enum {
 struct litevm_vcpu {
        struct litevm *litevm;
        struct vmcs *vmcs;
-       qlock_t mutex; //struct mutex mutex;
+       qlock_t mutex;
        int   cpu;
        int   launched;
        unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
@@ -166,7 +166,7 @@ struct litevm_vcpu {
        int nmsrs;
        struct vmx_msr_entry *guest_msrs;
        struct vmx_msr_entry *host_msrs;
-       LIST_HEAD(free_pages, free_pages) free_pages;
+       LIST_HEAD(free_pages, litevm_mmu_page) link;
        //struct list_head free_pages;
        struct litevm_mmu_page page_header_buf[LITEVM_NUM_MMU_PAGES];
        struct litevm_mmu mmu;
@@ -207,7 +207,7 @@ struct litevm {
        spinlock_t lock; /* protects everything except vcpus */
        int nmemslots;
        struct litevm_memory_slot memslots[LITEVM_MEMORY_SLOTS];
-       LIST_HEAD(active_mmu_pages, active_mmu_pages) active_mmu_pages;
+       LIST_HEAD(active_mmu_pages, litevm_mmu_page) link;
        //struct list_head active_mmu_pages;
        struct litevm_vcpu vcpus[LITEVM_MAX_VCPUS];
        int memory_config_version;
diff --git a/kern/arch/x86/vm_mmu.c b/kern/arch/x86/vm_mmu.c
new file mode 100644 (file)
index 0000000..85b2afd
--- /dev/null
@@ -0,0 +1,739 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * This module enables machines with Intel VT-x extensions to run virtual
+ * machines without emulation or binary translation.
+ *
+ * MMU support
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ *
+ * Authors:
+ *   Yaniv Kamay  <yaniv@qumranet.com>
+ *   Avi Kivity   <avi@qumranet.com>
+ *
+ */
+#define DEBUG
+#include <kmalloc.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+#include <error.h>
+#include <pmap.h>
+#include <sys/queue.h>
+#include <smp.h>
+#include <kref.h>
+#include <atomic.h>
+#include <alarm.h>
+#include <event.h>
+#include <umem.h>
+#include <devalarm.h>
+#include <arch/types.h>
+#include <arch/vm.h>
+#include <arch/emulate.h>
+#include <arch/vmdebug.h>
+#include <arch/msr-index.h>
+
+#define pgprintk(x...) do { } while (0)
+
+#define ASSERT(x)                                                      \
+       if (!(x)) {                                                     \
+               printd( "assertion failed %s:%d: %s\n", \
+                      __FILE__, __LINE__, #x);                         \
+       }
+
+#define PT64_ENT_PER_PAGE 512
+#define PT32_ENT_PER_PAGE 1024
+
+#define PT_WRITABLE_SHIFT 1
+
+#define PT_PRESENT_MASK (1ULL << 0)
+#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
+#define PT_USER_MASK (1ULL << 2)
+#define PT_PWT_MASK (1ULL << 3)
+#define PT_PCD_MASK (1ULL << 4)
+#define PT_ACCESSED_MASK (1ULL << 5)
+#define PT_DIRTY_MASK (1ULL << 6)
+#define PT_PAGE_SIZE_MASK (1ULL << 7)
+#define PT_PAT_MASK (1ULL << 7)
+#define PT_GLOBAL_MASK (1ULL << 8)
+#define PT64_NX_MASK (1ULL << 63)
+
+#define PT_PAT_SHIFT 7
+#define PT_DIR_PAT_SHIFT 12
+#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
+
+#define PT32_DIR_PSE36_SIZE 4
+#define PT32_DIR_PSE36_SHIFT 13
+#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
+
+
+#define PT32_PTE_COPY_MASK \
+       (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
+       PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_PAT_MASK | \
+       PT_GLOBAL_MASK )
+
+#define PT32_NON_PTE_COPY_MASK \
+       (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
+       PT_ACCESSED_MASK | PT_DIRTY_MASK)
+
+
+#define PT64_PTE_COPY_MASK \
+       (PT64_NX_MASK | PT32_PTE_COPY_MASK)
+
+#define PT64_NON_PTE_COPY_MASK \
+       (PT64_NX_MASK | PT32_NON_PTE_COPY_MASK)
+
+
+
+#define PT_FIRST_AVAIL_BITS_SHIFT 9
+#define PT64_SECOND_AVAIL_BITS_SHIFT 52
+
+#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+
+#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
+#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
+
+#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
+#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
+
+#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
+
+#define VALID_PAGE(x) ((x) != INVALID_PAGE)
+
+#define PT64_LEVEL_BITS 9
+
+#define PT64_LEVEL_SHIFT(level) \
+               ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
+
+#define PT64_LEVEL_MASK(level) \
+               (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
+
+#define PT64_INDEX(address, level)\
+       (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
+
+
+#define PT32_LEVEL_BITS 10
+
+#define PT32_LEVEL_SHIFT(level) \
+               ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
+
+#define PT32_LEVEL_MASK(level) \
+               (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
+
+#define PT32_INDEX(address, level)\
+       (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
+
+
+#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
+#define PT64_DIR_BASE_ADDR_MASK \
+       (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
+
+#define PT32_BASE_ADDR_MASK PAGE_MASK
+#define PT32_DIR_BASE_ADDR_MASK \
+       (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
+
+
+#define PFERR_PRESENT_MASK (1U << 0)
+#define PFERR_WRITE_MASK (1U << 1)
+#define PFERR_USER_MASK (1U << 2)
+
+#define PT64_ROOT_LEVEL 4
+#define PT32_ROOT_LEVEL 2
+#define PT32E_ROOT_LEVEL 3
+
+#define PT_DIRECTORY_LEVEL 2
+#define PT_PAGE_TABLE_LEVEL 1
+
+static int is_write_protection(void)
+{
+       return guest_cr0() & CR0_WP_MASK;
+}
+
+static int is_cpuid_PSE36(void)
+{
+       return 1;
+}
+
+static int is_present_pte(unsigned long pte)
+{
+       return pte & PT_PRESENT_MASK;
+}
+
+static int is_writeble_pte(unsigned long pte)
+{
+       return pte & PT_WRITABLE_MASK;
+}
+
+static int is_io_pte(unsigned long pte)
+{
+       return pte & PT_SHADOW_IO_MARK;
+}
+
+static void litevm_mmu_free_page(struct litevm_vcpu *vcpu, hpa_t page_hpa)
+{
+       struct litevm_mmu_page *page_head = page_header(page_hpa);
+
+       //SLIST_REMOVE(&vcpu->free_pages, page_head, 
+       //           litevm_mmu_page, link);
+       //list_del(&page_head->link);
+       page_head->page_hpa = page_hpa;
+       //list_add(&page_head->link, &vcpu->free_pages);
+       LIST_INSERT_HEAD(&vcpu->link, page_head, link);
+}
+
+static int is_empty_shadow_page(hpa_t page_hpa)
+{
+       uint32_t *pos;
+       uint32_t *end;
+       for (pos = KADDR(page_hpa), end = pos + PAGE_SIZE / sizeof(uint32_t);
+                     pos != end; pos++)
+               if (*pos != 0)
+                       return 0;
+       return 1;
+}
+
+static hpa_t litevm_mmu_alloc_page(struct litevm_vcpu *vcpu,
+                                  uint64_t *parent_pte)
+{
+       struct litevm_mmu_page *page;
+
+       if (LIST_EMPTY(&vcpu->link))
+               return INVALID_PAGE;
+
+       page = LIST_FIRST(&vcpu->link);
+       LIST_REMOVE(page, link);
+       LIST_INSERT_HEAD(&vcpu->litevm->link, page, link);
+       ASSERT(is_empty_shadow_page(page->page_hpa));
+       page->slot_bitmap = 0;
+       page->global = 1;
+       page->parent_pte = parent_pte;
+       return page->page_hpa;
+}
+
+static void page_header_update_slot(struct litevm *litevm, void *pte, gpa_t gpa)
+{
+       int slot = memslot_id(litevm, gfn_to_memslot(litevm, gpa >> PAGE_SHIFT));
+       struct litevm_mmu_page *page_head = page_header(PADDR(pte));
+
+       __set_bit(slot, &page_head->slot_bitmap);
+}
+
+hpa_t safe_gpa_to_hpa(struct litevm_vcpu *vcpu, gpa_t gpa)
+{
+       hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+
+       return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
+}
+
+hpa_t gpa_to_hpa(struct litevm_vcpu *vcpu, gpa_t gpa)
+{
+       struct litevm_memory_slot *slot;
+       struct page *page;
+
+       ASSERT((gpa & HPA_ERR_MASK) == 0);
+       slot = gfn_to_memslot(vcpu->litevm, gpa >> PAGE_SHIFT);
+       if (!slot)
+               return gpa | HPA_ERR_MASK;
+       page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
+       return ((hpa_t)page2ppn(page) << PAGE_SHIFT)
+               | (gpa & (PAGE_SIZE-1));
+}
+
+hpa_t gva_to_hpa(struct litevm_vcpu *vcpu, gva_t gva)
+{
+       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+
+       if (gpa == UNMAPPED_GVA)
+               return UNMAPPED_GVA;
+       return gpa_to_hpa(vcpu, gpa);
+}
+
+
+static void release_pt_page_64(struct litevm_vcpu *vcpu, hpa_t page_hpa,
+                              int level)
+{
+       ASSERT(vcpu);
+       ASSERT(VALID_PAGE(page_hpa));
+       ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
+
+       if (level == 1)
+               memset(KADDR(page_hpa), 0, PAGE_SIZE);
+       else {
+               uint64_t *pos;
+               uint64_t *end;
+
+               for (pos = KADDR(page_hpa), end = pos + PT64_ENT_PER_PAGE;
+                    pos != end; pos++) {
+                       uint64_t current_ent = *pos;
+
+                       *pos = 0;
+                       if (is_present_pte(current_ent))
+                               release_pt_page_64(vcpu,
+                                                 current_ent &
+                                                 PT64_BASE_ADDR_MASK,
+                                                 level - 1);
+               }
+       }
+       litevm_mmu_free_page(vcpu, page_hpa);
+}
+
+static void nonpaging_new_cr3(struct litevm_vcpu *vcpu)
+{
+}
+
+static int nonpaging_map(struct litevm_vcpu *vcpu, gva_t v, hpa_t p)
+{
+       int level = PT32E_ROOT_LEVEL;
+       hpa_t table_addr = vcpu->mmu.root_hpa;
+
+       for (; ; level--) {
+               uint32_t index = PT64_INDEX(v, level);
+               uint64_t *table;
+
+               ASSERT(VALID_PAGE(table_addr));
+               table = KADDR(table_addr);
+
+               if (level == 1) {
+                       mark_page_dirty(vcpu->litevm, v >> PAGE_SHIFT);
+                       page_header_update_slot(vcpu->litevm, table, v);
+                       table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
+                                                               PT_USER_MASK;
+                       return 0;
+               }
+
+               if (table[index] == 0) {
+                       hpa_t new_table = litevm_mmu_alloc_page(vcpu,
+                                                            &table[index]);
+
+                       if (!VALID_PAGE(new_table)) {
+                               pgprintk("nonpaging_map: ENOMEM\n");
+                               return -ENOMEM;
+                       }
+
+                       if (level == PT32E_ROOT_LEVEL)
+                               table[index] = new_table | PT_PRESENT_MASK;
+                       else
+                               table[index] = new_table | PT_PRESENT_MASK |
+                                               PT_WRITABLE_MASK | PT_USER_MASK;
+               }
+               table_addr = table[index] & PT64_BASE_ADDR_MASK;
+       }
+}
+
+static void nonpaging_flush(struct litevm_vcpu *vcpu)
+{
+       hpa_t root = vcpu->mmu.root_hpa;
+
+       ++litevm_stat.tlb_flush;
+       pgprintk("nonpaging_flush\n");
+       ASSERT(VALID_PAGE(root));
+       release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
+       root = litevm_mmu_alloc_page(vcpu, 0);
+       ASSERT(VALID_PAGE(root));
+       vcpu->mmu.root_hpa = root;
+       if (is_paging())
+               root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK));
+       vmcs_writel(GUEST_CR3, root);
+}
+
+static gpa_t nonpaging_gva_to_gpa(struct litevm_vcpu *vcpu, gva_t vaddr)
+{
+       return vaddr;
+}
+
+static int nonpaging_page_fault(struct litevm_vcpu *vcpu, gva_t gva,
+                              uint32_t error_code)
+{
+       int ret;
+       gpa_t addr = gva;
+
+       ASSERT(vcpu);
+       ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
+
+       for (;;) {
+            hpa_t paddr;
+
+            paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
+
+            if (is_error_hpa(paddr))
+                    return 1;
+
+            ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
+            if (ret) {
+                    nonpaging_flush(vcpu);
+                    continue;
+            }
+            break;
+       }
+       return ret;
+}
+
+static void nonpaging_inval_page(struct litevm_vcpu *vcpu, gva_t addr)
+{
+}
+
+static void nonpaging_free(struct litevm_vcpu *vcpu)
+{
+       hpa_t root;
+
+       ASSERT(vcpu);
+       root = vcpu->mmu.root_hpa;
+       if (VALID_PAGE(root))
+               release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
+       vcpu->mmu.root_hpa = INVALID_PAGE;
+}
+
+static int nonpaging_init_context(struct litevm_vcpu *vcpu)
+{
+       struct litevm_mmu *context = &vcpu->mmu;
+
+       context->new_cr3 = nonpaging_new_cr3;
+       context->page_fault = nonpaging_page_fault;
+       context->inval_page = nonpaging_inval_page;
+       context->gva_to_gpa = nonpaging_gva_to_gpa;
+       context->free = nonpaging_free;
+       context->root_level = PT32E_ROOT_LEVEL;
+       context->shadow_root_level = PT32E_ROOT_LEVEL;
+       context->root_hpa = litevm_mmu_alloc_page(vcpu, 0);
+       ASSERT(VALID_PAGE(context->root_hpa));
+       vmcs_writel(GUEST_CR3, context->root_hpa);
+       return 0;
+}
+
+
+static void litevm_mmu_flush_tlb(struct litevm_vcpu *vcpu)
+{
+       struct litevm_mmu_page *page, *npage;
+
+       //list_for_each_entry_safe(page, npage, &vcpu->litevm->active_mmu_pages,
+       LIST_FOREACH_SAFE(page, &vcpu->litevm->link,
+                                link, npage) {
+               if (page->global)
+                       continue;
+
+               if (!page->parent_pte)
+                       continue;
+
+               *page->parent_pte = 0;
+               release_pt_page_64(vcpu, page->page_hpa, 1);
+       }
+       ++litevm_stat.tlb_flush;
+}
+
+static void paging_new_cr3(struct litevm_vcpu *vcpu)
+{
+       litevm_mmu_flush_tlb(vcpu);
+}
+
+static void mark_pagetable_nonglobal(void *shadow_pte)
+{
+       page_header(PADDR(shadow_pte))->global = 0;
+}
+
+static inline void set_pte_common(struct litevm_vcpu *vcpu,
+                            uint64_t *shadow_pte,
+                            gpa_t gaddr,
+                            int dirty,
+                            uint64_t access_bits)
+{
+       hpa_t paddr;
+
+       *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
+       if (!dirty)
+               access_bits &= ~PT_WRITABLE_MASK;
+
+       if (access_bits & PT_WRITABLE_MASK)
+               mark_page_dirty(vcpu->litevm, gaddr >> PAGE_SHIFT);
+
+       *shadow_pte |= access_bits;
+
+       paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
+
+       if (!(*shadow_pte & PT_GLOBAL_MASK))
+               mark_pagetable_nonglobal(shadow_pte);
+
+       if (is_error_hpa(paddr)) {
+               *shadow_pte |= gaddr;
+               *shadow_pte |= PT_SHADOW_IO_MARK;
+               *shadow_pte &= ~PT_PRESENT_MASK;
+       } else {
+               *shadow_pte |= paddr;
+               page_header_update_slot(vcpu->litevm, shadow_pte, gaddr);
+       }
+}
+
+static void inject_page_fault(struct litevm_vcpu *vcpu,
+                             uint64_t addr,
+                             uint32_t err_code)
+{
+       uint32_t vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+
+       pgprintk("inject_page_fault: 0x%llx err 0x%x\n", addr, err_code);
+
+       ++litevm_stat.pf_guest;
+
+       if (is_page_fault(vect_info)) {
+               printd( "inject_page_fault: "
+                      "double fault 0x%llx @ 0x%lx\n",
+                      addr, vmcs_readl(GUEST_RIP));
+               vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
+               vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+                            DF_VECTOR |
+                            INTR_TYPE_EXCEPTION |
+                            INTR_INFO_DELIEVER_CODE_MASK |
+                            INTR_INFO_VALID_MASK);
+               return;
+       }
+       vcpu->cr2 = addr;
+       vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
+       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+                    PF_VECTOR |
+                    INTR_TYPE_EXCEPTION |
+                    INTR_INFO_DELIEVER_CODE_MASK |
+                    INTR_INFO_VALID_MASK);
+
+}
+
+static inline int fix_read_pf(uint64_t *shadow_ent)
+{
+       if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
+           !(*shadow_ent & PT_USER_MASK)) {
+               /*
+                * If supervisor write protect is disabled, we shadow kernel
+                * pages as user pages so we can trap the write access.
+                */
+               *shadow_ent |= PT_USER_MASK;
+               *shadow_ent &= ~PT_WRITABLE_MASK;
+
+               return 1;
+
+       }
+       return 0;
+}
+
+static int may_access(uint64_t pte, int write, int user)
+{
+
+       if (user && !(pte & PT_USER_MASK))
+               return 0;
+       if (write && !(pte & PT_WRITABLE_MASK))
+               return 0;
+       return 1;
+}
+
+/*
+ * Remove a shadow pte.
+ */
+static void paging_inval_page(struct litevm_vcpu *vcpu, gva_t addr)
+{
+       hpa_t page_addr = vcpu->mmu.root_hpa;
+       int level = vcpu->mmu.shadow_root_level;
+
+       ++litevm_stat.invlpg;
+
+       for (; ; level--) {
+               uint32_t index = PT64_INDEX(addr, level);
+               uint64_t *table = KADDR(page_addr);
+
+               if (level == PT_PAGE_TABLE_LEVEL ) {
+                       table[index] = 0;
+                       return;
+               }
+
+               if (!is_present_pte(table[index]))
+                       return;
+
+               page_addr = table[index] & PT64_BASE_ADDR_MASK;
+
+               if (level == PT_DIRECTORY_LEVEL &&
+                         (table[index] & PT_SHADOW_PS_MARK)) {
+                       table[index] = 0;
+                       release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
+
+                       //flush tlb
+                       vmcs_writel(GUEST_CR3, vcpu->mmu.root_hpa |
+                                   (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+                       return;
+               }
+       }
+}
+
+static void paging_free(struct litevm_vcpu *vcpu)
+{
+       nonpaging_free(vcpu);
+}
+
+#define PTTYPE 64
+#include "paging_tmpl.h"
+#undef PTTYPE
+
+#define PTTYPE 32
+#include "paging_tmpl.h"
+#undef PTTYPE
+
+static int paging64_init_context(struct litevm_vcpu *vcpu)
+{
+       struct litevm_mmu *context = &vcpu->mmu;
+
+       ASSERT(is_pae());
+       context->new_cr3 = paging_new_cr3;
+       context->page_fault = paging64_page_fault;
+       context->inval_page = paging_inval_page;
+       context->gva_to_gpa = paging64_gva_to_gpa;
+       context->free = paging_free;
+       context->root_level = PT64_ROOT_LEVEL;
+       context->shadow_root_level = PT64_ROOT_LEVEL;
+       context->root_hpa = litevm_mmu_alloc_page(vcpu, 0);
+       ASSERT(VALID_PAGE(context->root_hpa));
+       vmcs_writel(GUEST_CR3, context->root_hpa |
+                   (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+       return 0;
+}
+
+static int paging32_init_context(struct litevm_vcpu *vcpu)
+{
+       struct litevm_mmu *context = &vcpu->mmu;
+
+       context->new_cr3 = paging_new_cr3;
+       context->page_fault = paging32_page_fault;
+       context->inval_page = paging_inval_page;
+       context->gva_to_gpa = paging32_gva_to_gpa;
+       context->free = paging_free;
+       context->root_level = PT32_ROOT_LEVEL;
+       context->shadow_root_level = PT32E_ROOT_LEVEL;
+       context->root_hpa = litevm_mmu_alloc_page(vcpu, 0);
+       ASSERT(VALID_PAGE(context->root_hpa));
+       vmcs_writel(GUEST_CR3, context->root_hpa |
+                   (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+       return 0;
+}
+
+static int paging32E_init_context(struct litevm_vcpu *vcpu)
+{
+       int ret;
+
+       if ((ret = paging64_init_context(vcpu)))
+               return ret;
+
+       vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
+       vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
+       return 0;
+}
+
+static int init_litevm_mmu(struct litevm_vcpu *vcpu)
+{
+       ASSERT(vcpu);
+       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+
+       if (!is_paging())
+               return nonpaging_init_context(vcpu);
+       else if (is_long_mode())
+               return paging64_init_context(vcpu);
+       else if (is_pae())
+               return paging32E_init_context(vcpu);
+       else
+               return paging32_init_context(vcpu);
+}
+
+static void destroy_litevm_mmu(struct litevm_vcpu *vcpu)
+{
+       ASSERT(vcpu);
+       if (VALID_PAGE(vcpu->mmu.root_hpa)) {
+               vcpu->mmu.free(vcpu);
+               vcpu->mmu.root_hpa = INVALID_PAGE;
+       }
+}
+
+int litevm_mmu_reset_context(struct litevm_vcpu *vcpu)
+{
+       destroy_litevm_mmu(vcpu);
+       return init_litevm_mmu(vcpu);
+}
+
+static void free_mmu_pages(struct litevm_vcpu *vcpu)
+{
+       while (!LIST_EMPTY(&vcpu->link)) {
+               struct litevm_mmu_page *page;
+
+               page = LIST_NEXT(&vcpu->link, link);
+               LIST_REMOVE(page, link);
+               __free_page(ppn2page(page->page_hpa >> PAGE_SHIFT));
+               page->page_hpa = INVALID_PAGE;
+       }
+}
+
+static int alloc_mmu_pages(struct litevm_vcpu *vcpu)
+{
+       int i;
+
+       ASSERT(vcpu);
+
+       /* we could try to do the contiguous alloc but it's not
+        * necessary for them to be contiguous.
+        */
+       for (i = 0; i < LITEVM_NUM_MMU_PAGES; i++) {
+               struct page *page;
+               struct litevm_mmu_page *page_header = &vcpu->page_header_buf[i];
+
+               INIT_LIST_HEAD(&page_header->link);
+               if ((page = kpage_alloc_addr()) == NULL)
+                       goto error_1;
+               page->pg_private = (uintptr_t)page_header;
+               page_header->page_hpa = (hpa_t)page2ppn(page) << PAGE_SHIFT;
+               memset(KADDR(page_header->page_hpa), 0, PAGE_SIZE);
+               LIST_INSERT_HEAD(&vcpu->link, page_header, link);
+       }
+       return 0;
+
+error_1:
+       free_mmu_pages(vcpu);
+       return -ENOMEM;
+}
+
+int litevm_mmu_init(struct litevm_vcpu *vcpu)
+{
+       int r;
+
+       ASSERT(vcpu);
+       ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
+       ASSERT(LIST_EMPTY(&vcpu->link));
+
+       if ((r = alloc_mmu_pages(vcpu)))
+               return r;
+
+       if ((r = init_litevm_mmu(vcpu))) {
+               free_mmu_pages(vcpu);
+               return r;
+       }
+       return 0;
+}
+
+void litevm_mmu_destroy(struct litevm_vcpu *vcpu)
+{
+       ASSERT(vcpu);
+
+       destroy_litevm_mmu(vcpu);
+       free_mmu_pages(vcpu);
+}
+
+void litevm_mmu_slot_remove_write_access(struct litevm *litevm, int slot)
+{
+       struct litevm_mmu_page *page, *link;
+
+       LIST_FOREACH(page, &litevm->link, link) {
+               int i;
+               uint64_t *pt;
+
+               if (!test_bit(slot, &page->slot_bitmap))
+                       continue;
+
+               pt = KADDR(page->page_hpa);
+               for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+                       /* avoid RMW */
+                       if (pt[i] & PT_WRITABLE_MASK)
+                               pt[i] &= ~PT_WRITABLE_MASK;
+
+       }
+}