snapshot: Things now build, with warnings, but don't link.
authorRonald G. Minnich <rminnich@google.com>
Tue, 7 Jan 2014 18:28:27 +0000 (10:28 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Fri, 17 Jan 2014 22:35:29 +0000 (14:35 -0800)
Signed-off-by: Ronald G. Minnich <rminnich@google.com>
kern/arch/x86/Kbuild
kern/arch/x86/paging_tmpl.h
kern/arch/x86/vm.c
kern/arch/x86/vm_mmu.c

index 1b61cf8..c7f36da 100644 (file)
@@ -22,5 +22,5 @@ obj-y                                         += smp_boot.o
 obj-y                                          += smp_entry$(BITS).o
 obj-y                                          += trap.o trap$(BITS).o
 obj-y                                          += trapentry$(BITS).o
-obj-y                                          += vm.o
-obj-y                                          += vm_mmu.o
+#obj-y                                         += vm.o
+#obj-y                                         += vm_mmu.o
index 2fda84c..fd5e336 100644 (file)
@@ -49,7 +49,9 @@ static void FNAME(init_walker)(struct guest_walker *walker,
        slot = gfn_to_memslot(vcpu->litevm,
                              (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
        hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
-       walker->table = kmap_atomic(ppn2page(hpa >> PAGE_SHIFT));
+       // well, it seems that stuff is always addressable in akaros. I hope.
+       //walker->table = vmap_pmem(ppn2page(hpa >> PAGE_SHIFT), PAGE_SIZE);
+       walker->table = ppn2page(hpa);
 
        ASSERT((!is_long_mode() && is_pae()) ||
               (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
@@ -61,7 +63,7 @@ static void FNAME(init_walker)(struct guest_walker *walker,
 
 static void FNAME(release_walker)(struct guest_walker *walker)
 {
-       kunmap_atomic(walker->table);
+       //vunmap_pmem(walker->table, PAGE_SIZE);
 }
 
 static void FNAME(set_pte)(struct litevm_vcpu *vcpu, uint64_t guest_pte,
@@ -119,8 +121,9 @@ static pt_element_t *FNAME(fetch_guest)(struct litevm_vcpu *vcpu,
                if (walker->level != 3 || is_long_mode())
                        walker->inherited_ar &= walker->table[index];
                paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK);
-               kunmap_atomic(walker->table);
-               walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT));
+               //kunmap_atomic(walker->table);
+               //walker->table = vmap_pmem(ppn2page(paddr >> PAGE_SHIFT), PAGE_SIZE);
+               walker->table = ppn2page(paddr >> PAGE_SHIFT);
                --walker->level;
        }
 }
index 9303e7e..d65c5f0 100644 (file)
@@ -296,9 +296,8 @@ int litevm_read_guest(struct litevm_vcpu *vcpu,
 
                if (is_error_hpa(paddr))
                        break;
-#warning "kmap_atomic"
-               guest_buf = NULL; //(hva_t)kmap_atomic(
-               //      pfn_to_page(paddr >> PAGE_SHIFT));
+               guest_buf = (hva_t)vmap_pmem(
+                       ppn2page(paddr >> PAGE_SHIFT), PAGE_SIZE);
                offset = addr & ~PAGE_MASK;
                guest_buf |= offset;
                now = MIN(size, PAGE_SIZE - offset);
@@ -306,8 +305,7 @@ int litevm_read_guest(struct litevm_vcpu *vcpu,
                host_buf += now;
                addr += now;
                size -= now;
-#warning "kunmap_atomic"
-//             kunmap_atomic((void *)(guest_buf & PAGE_MASK));
+               vunmap_pmem((void *)(guest_buf & PAGE_MASK), PAGE_SIZE);
        }
        return req_size - size;
 }
@@ -331,7 +329,7 @@ int litevm_write_guest(struct litevm_vcpu *vcpu,
                if (is_error_hpa(paddr))
                        break;
 
-               guest_buf = 0; //(hva_t)kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT));
+               guest_buf = (hva_t)vmap_pmem(ppn2page(paddr >> PAGE_SHIFT), PAGE_SIZE);
                offset = addr & ~PAGE_MASK;
                guest_buf |= offset;
                now = MIN(size, PAGE_SIZE - offset);
@@ -339,7 +337,7 @@ int litevm_write_guest(struct litevm_vcpu *vcpu,
                host_buf += now;
                addr += now;
                size -= now;
-               //kunmap_atomic((void *)(guest_buf & PAGE_MASK));
+               vunmap_pmem((void *)(guest_buf & PAGE_MASK), PAGE_SIZE);
        }
        return req_size - size;
 }
@@ -787,19 +785,19 @@ static int init_rmode_tss(struct litevm* litevm)
                return 0;
        }
 
-       page = kmap_atomic(p1);
+       page = vmap_pmem(p1, PAGE_SIZE);
        memset(page, 0, PAGE_SIZE);
        *(uint16_t*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
-       kunmap_atomic(page);
+       vunmap_pmem(page, PAGE_SIZE);
 
-       page = kmap_atomic(p2);
+       page = vmap_pmem(p2, PAGE_SIZE);
        memset(page, 0, PAGE_SIZE);
-       kunmap_atomic(page);
+       vunmap_pmem(page, PAGE_SIZE);
 
-       page = kmap_atomic(p3);
+       page = vmap_pmem(p3, PAGE_SIZE);
        memset(page, 0, PAGE_SIZE);
        *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
-       kunmap_atomic(page);
+       vunmap_pmem(page, PAGE_SIZE);
 
        return 1;
 }
@@ -892,7 +890,7 @@ static int pdptrs_have_reserved_bits_set(struct litevm_vcpu *vcpu,
        spin_lock(&vcpu->litevm->lock);
        memslot = gfn_to_memslot(vcpu->litevm, pdpt_gfn);
        /* FIXME: !memslot - emulate? 0xff? */
-       pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn));
+       pdpt = vmap_atomic(gfn_to_pmem(memslot, pdpt_gfn), PAGE_SIZE);
 
        for (i = 0; i < 4; ++i) {
                pdpte = pdpt[offset + i];
@@ -900,7 +898,7 @@ static int pdptrs_have_reserved_bits_set(struct litevm_vcpu *vcpu,
                        break;
        }
 
-       kunmap_atomic(pdpt);
+       vunmap_pmem(pdpt, PAGE_SIZE);
        spin_unlock(&vcpu->litevm->lock);
 
        return i != 4;
@@ -1696,11 +1694,11 @@ static int emulator_read_std(unsigned long addr,
                memslot = gfn_to_memslot(vcpu->litevm, pfn);
                if (!memslot)
                        return X86EMUL_UNHANDLEABLE;
-               page = kmap_atomic(gfn_to_page(memslot, pfn));
+               page = vmap_atomic(gfn_to_pmem(memslot, pfn), PAGE_SIZE);
 
                memcpy(data, page + offset, tocopy);
 
-               kunmap_atomic(page);
+               vunmap_pmem(page, PAGE_SIZE);
 
                bytes -= tocopy;
                data += tocopy;
index 85b2afd..e28c0cb 100644 (file)
@@ -175,8 +175,7 @@ static void litevm_mmu_free_page(struct litevm_vcpu *vcpu, hpa_t page_hpa)
 {
        struct litevm_mmu_page *page_head = page_header(page_hpa);
 
-       //SLIST_REMOVE(&vcpu->free_pages, page_head, 
-       //           litevm_mmu_page, link);
+       LIST_REMOVE(page_head, link);
        //list_del(&page_head->link);
        page_head->page_hpa = page_hpa;
        //list_add(&page_head->link, &vcpu->free_pages);
@@ -654,10 +653,11 @@ int litevm_mmu_reset_context(struct litevm_vcpu *vcpu)
 
 static void free_mmu_pages(struct litevm_vcpu *vcpu)
 {
+       /* todo: use the right macros */
        while (!LIST_EMPTY(&vcpu->link)) {
                struct litevm_mmu_page *page;
 
-               page = LIST_NEXT(&vcpu->link, link);
+               page = LIST_FIRST(&vcpu->link);
                LIST_REMOVE(page, link);
                __free_page(ppn2page(page->page_hpa >> PAGE_SHIFT));
                page->page_hpa = INVALID_PAGE;
@@ -680,7 +680,7 @@ static int alloc_mmu_pages(struct litevm_vcpu *vcpu)
                INIT_LIST_HEAD(&page_header->link);
                if ((page = kpage_alloc_addr()) == NULL)
                        goto error_1;
-               page->pg_private = (uintptr_t)page_header;
+               page->pg_private = page_header;
                page_header->page_hpa = (hpa_t)page2ppn(page) << PAGE_SHIFT;
                memset(KADDR(page_header->page_hpa), 0, PAGE_SIZE);
                LIST_INSERT_HEAD(&vcpu->link, page_header, link);