PCI device locking and synchronization
[akaros.git] / kern / arch / x86 / vmx_mmu.c
index a85082e..b65fc12 100644 (file)
@@ -155,22 +155,22 @@ static int is_cpuid_PSE36(void)
 
 static int is_present_pte(unsigned long pte)
 {
-       print_func_entry();
-       print_func_exit();
+       //print_func_entry();
+       //print_func_exit();
        return pte & PT_PRESENT_MASK;
 }
 
 static int is_writeble_pte(unsigned long pte)
 {
-       print_func_entry();
-       print_func_exit();
+       //print_func_entry();
+       //print_func_exit();
        return pte & PT_WRITABLE_MASK;
 }
 
 static int is_io_pte(unsigned long pte)
 {
-       print_func_entry();
-       print_func_exit();
+       //print_func_entry();
+       //print_func_exit();
        return pte & PT_SHADOW_IO_MARK;
 }
 
@@ -251,12 +251,17 @@ hpa_t gpa_to_hpa(struct litevm_vcpu * vcpu, gpa_t gpa)
 
        ASSERT((gpa & HPA_ERR_MASK) == 0);
        slot = gfn_to_memslot(vcpu->litevm, gpa >> PAGE_SHIFT);
+       printk("GFN %016lx memslot %p\n", gpa>>PAGE_SHIFT, slot);
        if (!slot) {
+               printk("GFN_TO_MEMSLOT FAILED!\n");
                print_func_exit();
                return gpa | HPA_ERR_MASK;
        }
        page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
+       printk("Page is %p\n", page);
        print_func_exit();
+       printk("gpa_to_hpa: return %016lx\n",  ((hpa_t) page2ppn(page) << PAGE_SHIFT)
+               | (gpa & (PAGE_SIZE - 1)));
        return ((hpa_t) page2ppn(page) << PAGE_SHIFT)
                | (gpa & (PAGE_SIZE - 1));
 }
@@ -314,6 +319,8 @@ static int nonpaging_map(struct litevm_vcpu *vcpu, gva_t v, hpa_t p)
        print_func_entry();
        int level = PT32E_ROOT_LEVEL;
        hpa_t table_addr = vcpu->mmu.root_hpa;
+printk("nonpaging_map: v %016lx, p %016lx\n", v, p);
+hexdump(KADDR(p), 32);
 
        for (;; level--) {
                uint32_t index = PT64_INDEX(v, level);
@@ -332,8 +339,7 @@ static int nonpaging_map(struct litevm_vcpu *vcpu, gva_t v, hpa_t p)
                }
 
                if (table[index] == 0) {
-                       hpa_t new_table = litevm_mmu_alloc_page(vcpu,
-                                                                                                       &table[index]);
+                       hpa_t new_table = litevm_mmu_alloc_page(vcpu, &table[index]);
 
                        if (!VALID_PAGE(new_table)) {
                                pgprintk("nonpaging_map: ENOMEM\n");
@@ -384,6 +390,7 @@ static int nonpaging_page_fault(struct litevm_vcpu *vcpu, gva_t gva,
        int ret;
        gpa_t addr = gva;
 
+printk("nonpaging_page_fault: %016llx\n", gva);
        ASSERT(vcpu);
        ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
 
@@ -529,7 +536,7 @@ static void inject_page_fault(struct litevm_vcpu *vcpu,
                vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                                         DF_VECTOR |
                                         INTR_TYPE_EXCEPTION |
-                                        INTR_INFO_DELIEVER_CODE_MASK | INTR_INFO_VALID_MASK);
+                                        INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK);
                print_func_exit();
                return;
        }
@@ -538,7 +545,7 @@ static void inject_page_fault(struct litevm_vcpu *vcpu,
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                                 PF_VECTOR |
                                 INTR_TYPE_EXCEPTION |
-                                INTR_INFO_DELIEVER_CODE_MASK | INTR_INFO_VALID_MASK);
+                                INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK);
 
        print_func_exit();
 }
@@ -587,6 +594,7 @@ static void paging_inval_page(struct litevm_vcpu *vcpu, gva_t addr)
        hpa_t page_addr = vcpu->mmu.root_hpa;
        int level = vcpu->mmu.shadow_root_level;
 
+printk("paging_inval_page: addr %016lx\n", addr);
        ++litevm_stat.invlpg;
 
        for (;; level--) {
@@ -762,10 +770,10 @@ static int alloc_mmu_pages(struct litevm_vcpu *vcpu)
                struct page *page;
                struct litevm_mmu_page *page_header = &vcpu->page_header_buf[i];
 
-               if ((page = kpage_alloc_addr()) == NULL)
+               if (kpage_alloc(&page) != ESUCCESS)
                        goto error_1;
                page->pg_private = page_header;
-               page_header->page_hpa = (hpa_t) page2ppn(page) << PAGE_SHIFT;
+               page_header->page_hpa = (hpa_t) page2pa(page);
                memset(KADDR(page_header->page_hpa), 0, PAGE_SIZE);
                LIST_INSERT_HEAD(&vcpu->link, page_header, link);
        }