2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
23 #include <sys/queue.h>
31 #include <arch/types.h>
33 #include <arch/emulate.h>
34 #include <arch/vmdebug.h>
35 #include <arch/msr-index.h>
37 #define pgprintk(x...) do { } while (0)
41 printd( "assertion failed %s:%d: %s\n", \
42 __FILE__, __LINE__, #x); \
45 #define PT64_ENT_PER_PAGE 512
46 #define PT32_ENT_PER_PAGE 1024
48 #define PT_WRITABLE_SHIFT 1
50 #define PT_PRESENT_MASK (1ULL << 0)
51 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
52 #define PT_USER_MASK (1ULL << 2)
53 #define PT_PWT_MASK (1ULL << 3)
54 #define PT_PCD_MASK (1ULL << 4)
55 #define PT_ACCESSED_MASK (1ULL << 5)
56 #define PT_DIRTY_MASK (1ULL << 6)
57 #define PT_PAGE_SIZE_MASK (1ULL << 7)
58 #define PT_PAT_MASK (1ULL << 7)
59 #define PT_GLOBAL_MASK (1ULL << 8)
60 #define PT64_NX_MASK (1ULL << 63)
62 #define PT_PAT_SHIFT 7
63 #define PT_DIR_PAT_SHIFT 12
64 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
66 #define PT32_DIR_PSE36_SIZE 4
67 #define PT32_DIR_PSE36_SHIFT 13
68 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
70 #define PT32_PTE_COPY_MASK \
71 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
72 PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_PAT_MASK | \
75 #define PT32_NON_PTE_COPY_MASK \
76 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
77 PT_ACCESSED_MASK | PT_DIRTY_MASK)
79 #define PT64_PTE_COPY_MASK \
80 (PT64_NX_MASK | PT32_PTE_COPY_MASK)
82 #define PT64_NON_PTE_COPY_MASK \
83 (PT64_NX_MASK | PT32_NON_PTE_COPY_MASK)
85 #define PT_FIRST_AVAIL_BITS_SHIFT 9
86 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
88 #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
89 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
91 #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
92 #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
94 #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
95 #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
97 #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
99 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
101 #define PT64_LEVEL_BITS 9
103 #define PT64_LEVEL_SHIFT(level) \
104 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
106 #define PT64_LEVEL_MASK(level) \
107 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
109 #define PT64_INDEX(address, level)\
110 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
112 #define PT32_LEVEL_BITS 10
114 #define PT32_LEVEL_SHIFT(level) \
115 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
117 #define PT32_LEVEL_MASK(level) \
118 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
120 #define PT32_INDEX(address, level)\
121 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
123 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
124 #define PT64_DIR_BASE_ADDR_MASK \
125 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
127 #define PT32_BASE_ADDR_MASK PAGE_MASK
128 #define PT32_DIR_BASE_ADDR_MASK \
129 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
131 #define PFERR_PRESENT_MASK (1U << 0)
132 #define PFERR_WRITE_MASK (1U << 1)
133 #define PFERR_USER_MASK (1U << 2)
135 #define PT64_ROOT_LEVEL 4
136 #define PT32_ROOT_LEVEL 2
137 #define PT32E_ROOT_LEVEL 3
139 #define PT_DIRECTORY_LEVEL 2
140 #define PT_PAGE_TABLE_LEVEL 1
142 static int is_write_protection(void)
146 return guest_cr0() & CR0_WP_MASK;
149 static int is_cpuid_PSE36(void)
156 static int is_present_pte(unsigned long pte)
158 //print_func_entry();
160 return pte & PT_PRESENT_MASK;
163 static int is_writeble_pte(unsigned long pte)
165 //print_func_entry();
167 return pte & PT_WRITABLE_MASK;
170 static int is_io_pte(unsigned long pte)
172 //print_func_entry();
174 return pte & PT_SHADOW_IO_MARK;
177 static void litevm_mmu_free_page(struct litevm_vcpu *vcpu, hpa_t page_hpa)
180 struct litevm_mmu_page *page_head = page_header(page_hpa);
182 LIST_REMOVE(page_head, link);
183 //list_del(&page_head->link);
184 page_head->page_hpa = page_hpa;
185 //list_add(&page_head->link, &vcpu->free_pages);
186 LIST_INSERT_HEAD(&vcpu->link, page_head, link);
190 static int is_empty_shadow_page(hpa_t page_hpa)
195 for (pos = KADDR(page_hpa), end = pos + PAGE_SIZE / sizeof(uint32_t);
205 static hpa_t litevm_mmu_alloc_page(struct litevm_vcpu *vcpu,
206 uint64_t * parent_pte)
209 struct litevm_mmu_page *page;
211 if (LIST_EMPTY(&vcpu->link)) {
216 page = LIST_FIRST(&vcpu->link);
217 LIST_REMOVE(page, link);
218 LIST_INSERT_HEAD(&vcpu->litevm->link, page, link);
219 ASSERT(is_empty_shadow_page(page->page_hpa));
220 page->slot_bitmap = 0;
222 page->parent_pte = parent_pte;
224 return page->page_hpa;
227 static void page_header_update_slot(struct litevm *litevm, void *pte, gpa_t gpa)
230 int slot = memslot_id(litevm, gfn_to_memslot(litevm, gpa >> PAGE_SHIFT));
231 struct litevm_mmu_page *page_head = page_header(PADDR(pte));
233 SET_BITMASK_BIT_ATOMIC((uint8_t *) & page_head->slot_bitmap, slot);
237 hpa_t safe_gpa_to_hpa(struct litevm_vcpu *vcpu, gpa_t gpa)
240 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
243 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK) : hpa;
246 hpa_t gpa_to_hpa(struct litevm_vcpu * vcpu, gpa_t gpa)
249 struct litevm_memory_slot *slot;
252 ASSERT((gpa & HPA_ERR_MASK) == 0);
253 slot = gfn_to_memslot(vcpu->litevm, gpa >> PAGE_SHIFT);
254 printk("GFN %016lx memslot %p\n", gpa>>PAGE_SHIFT, slot);
256 printk("GFN_TO_MEMSLOT FAILED!\n");
258 return gpa | HPA_ERR_MASK;
260 page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
261 printk("Page is %p\n", page);
263 printk("gpa_to_hpa: return %016lx\n", ((hpa_t) page2ppn(page) << PAGE_SHIFT)
264 | (gpa & (PAGE_SIZE - 1)));
265 return ((hpa_t) page2ppn(page) << PAGE_SHIFT)
266 | (gpa & (PAGE_SIZE - 1));
269 hpa_t gva_to_hpa(struct litevm_vcpu * vcpu, gva_t gva)
272 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
274 if (gpa == UNMAPPED_GVA) {
279 return gpa_to_hpa(vcpu, gpa);
282 static void release_pt_page_64(struct litevm_vcpu *vcpu, hpa_t page_hpa,
287 ASSERT(VALID_PAGE(page_hpa));
288 ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
291 memset(KADDR(page_hpa), 0, PAGE_SIZE);
296 for (pos = KADDR(page_hpa), end = pos + PT64_ENT_PER_PAGE;
298 uint64_t current_ent = *pos;
301 if (is_present_pte(current_ent))
302 release_pt_page_64(vcpu,
304 PT64_BASE_ADDR_MASK, level - 1);
307 litevm_mmu_free_page(vcpu, page_hpa);
311 static void nonpaging_new_cr3(struct litevm_vcpu *vcpu)
317 static int nonpaging_map(struct litevm_vcpu *vcpu, gva_t v, hpa_t p)
320 int level = PT32E_ROOT_LEVEL;
321 hpa_t table_addr = vcpu->mmu.root_hpa;
322 printk("nonpaging_map: v %016lx, p %016lx\n", v, p);
323 hexdump(KADDR(p), 32);
326 uint32_t index = PT64_INDEX(v, level);
329 ASSERT(VALID_PAGE(table_addr));
330 table = KADDR(table_addr);
333 mark_page_dirty(vcpu->litevm, v >> PAGE_SHIFT);
334 page_header_update_slot(vcpu->litevm, table, v);
335 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
341 if (table[index] == 0) {
342 hpa_t new_table = litevm_mmu_alloc_page(vcpu, &table[index]);
344 if (!VALID_PAGE(new_table)) {
345 pgprintk("nonpaging_map: ENOMEM\n");
350 if (level == PT32E_ROOT_LEVEL)
351 table[index] = new_table | PT_PRESENT_MASK;
353 table[index] = new_table | PT_PRESENT_MASK |
354 PT_WRITABLE_MASK | PT_USER_MASK;
356 table_addr = table[index] & PT64_BASE_ADDR_MASK;
361 static void nonpaging_flush(struct litevm_vcpu *vcpu)
364 hpa_t root = vcpu->mmu.root_hpa;
366 ++litevm_stat.tlb_flush;
367 pgprintk("nonpaging_flush\n");
368 ASSERT(VALID_PAGE(root));
369 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
370 root = litevm_mmu_alloc_page(vcpu, 0);
371 ASSERT(VALID_PAGE(root));
372 vcpu->mmu.root_hpa = root;
374 root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK));
375 vmcs_writel(GUEST_CR3, root);
379 static gpa_t nonpaging_gva_to_gpa(struct litevm_vcpu *vcpu, gva_t vaddr)
386 static int nonpaging_page_fault(struct litevm_vcpu *vcpu, gva_t gva,
393 printk("nonpaging_page_fault: %016llx\n", gva);
395 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
400 paddr = gpa_to_hpa(vcpu, addr & PT64_BASE_ADDR_MASK);
402 if (is_error_hpa(paddr)) {
407 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
409 nonpaging_flush(vcpu);
418 static void nonpaging_inval_page(struct litevm_vcpu *vcpu, gva_t addr)
424 static void nonpaging_free(struct litevm_vcpu *vcpu)
430 root = vcpu->mmu.root_hpa;
431 if (VALID_PAGE(root))
432 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
433 vcpu->mmu.root_hpa = INVALID_PAGE;
437 static int nonpaging_init_context(struct litevm_vcpu *vcpu)
440 struct litevm_mmu *context = &vcpu->mmu;
442 context->new_cr3 = nonpaging_new_cr3;
443 context->page_fault = nonpaging_page_fault;
444 context->inval_page = nonpaging_inval_page;
445 context->gva_to_gpa = nonpaging_gva_to_gpa;
446 context->free = nonpaging_free;
447 context->root_level = PT32E_ROOT_LEVEL;
448 context->shadow_root_level = PT32E_ROOT_LEVEL;
449 context->root_hpa = litevm_mmu_alloc_page(vcpu, 0);
450 ASSERT(VALID_PAGE(context->root_hpa));
451 vmcs_writel(GUEST_CR3, context->root_hpa);
456 static void litevm_mmu_flush_tlb(struct litevm_vcpu *vcpu)
459 struct litevm_mmu_page *page, *npage;
461 //list_for_each_entry_safe(page, npage, &vcpu->litevm->active_mmu_pages,
462 LIST_FOREACH_SAFE(page, &vcpu->litevm->link, link, npage) {
466 if (!page->parent_pte)
469 *page->parent_pte = 0;
470 release_pt_page_64(vcpu, page->page_hpa, 1);
472 ++litevm_stat.tlb_flush;
476 static void paging_new_cr3(struct litevm_vcpu *vcpu)
479 litevm_mmu_flush_tlb(vcpu);
483 static void mark_pagetable_nonglobal(void *shadow_pte)
486 page_header(PADDR(shadow_pte))->global = 0;
490 static inline void set_pte_common(struct litevm_vcpu *vcpu,
491 uint64_t * shadow_pte,
492 gpa_t gaddr, int dirty, uint64_t access_bits)
497 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
499 access_bits &= ~PT_WRITABLE_MASK;
501 if (access_bits & PT_WRITABLE_MASK)
502 mark_page_dirty(vcpu->litevm, gaddr >> PAGE_SHIFT);
504 *shadow_pte |= access_bits;
506 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
508 if (!(*shadow_pte & PT_GLOBAL_MASK))
509 mark_pagetable_nonglobal(shadow_pte);
511 if (is_error_hpa(paddr)) {
512 *shadow_pte |= gaddr;
513 *shadow_pte |= PT_SHADOW_IO_MARK;
514 *shadow_pte &= ~PT_PRESENT_MASK;
516 *shadow_pte |= paddr;
517 page_header_update_slot(vcpu->litevm, shadow_pte, gaddr);
522 static void inject_page_fault(struct litevm_vcpu *vcpu,
523 uint64_t addr, uint32_t err_code)
526 uint32_t vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
528 pgprintk("inject_page_fault: 0x%llx err 0x%x\n", addr, err_code);
530 ++litevm_stat.pf_guest;
532 if (is_page_fault(vect_info)) {
533 printd("inject_page_fault: "
534 "double fault 0x%llx @ 0x%lx\n", addr, vmcs_readl(GUEST_RIP));
535 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
536 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
538 INTR_TYPE_EXCEPTION |
539 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK);
544 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
545 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
547 INTR_TYPE_EXCEPTION |
548 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK);
553 static inline int fix_read_pf(uint64_t * shadow_ent)
556 if ((*shadow_ent & PT_SHADOW_USER_MASK) && !(*shadow_ent & PT_USER_MASK)) {
558 * If supervisor write protect is disabled, we shadow kernel
559 * pages as user pages so we can trap the write access.
561 *shadow_ent |= PT_USER_MASK;
562 *shadow_ent &= ~PT_WRITABLE_MASK;
572 static int may_access(uint64_t pte, int write, int user)
576 if (user && !(pte & PT_USER_MASK)) {
580 if (write && !(pte & PT_WRITABLE_MASK)) {
589 * Remove a shadow pte.
591 static void paging_inval_page(struct litevm_vcpu *vcpu, gva_t addr)
594 hpa_t page_addr = vcpu->mmu.root_hpa;
595 int level = vcpu->mmu.shadow_root_level;
597 printk("paging_inval_page: addr %016lx\n", addr);
598 ++litevm_stat.invlpg;
601 uint32_t index = PT64_INDEX(addr, level);
602 uint64_t *table = KADDR(page_addr);
604 if (level == PT_PAGE_TABLE_LEVEL) {
610 if (!is_present_pte(table[index])) {
615 page_addr = table[index] & PT64_BASE_ADDR_MASK;
617 if (level == PT_DIRECTORY_LEVEL && (table[index] & PT_SHADOW_PS_MARK)) {
619 release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
622 vmcs_writel(GUEST_CR3, vcpu->mmu.root_hpa |
623 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
631 static void paging_free(struct litevm_vcpu *vcpu)
634 nonpaging_free(vcpu);
639 #include "paging_tmpl.h"
643 #include "paging_tmpl.h"
646 static int paging64_init_context(struct litevm_vcpu *vcpu)
649 struct litevm_mmu *context = &vcpu->mmu;
652 context->new_cr3 = paging_new_cr3;
653 context->page_fault = paging64_page_fault;
654 context->inval_page = paging_inval_page;
655 context->gva_to_gpa = paging64_gva_to_gpa;
656 context->free = paging_free;
657 context->root_level = PT64_ROOT_LEVEL;
658 context->shadow_root_level = PT64_ROOT_LEVEL;
659 context->root_hpa = litevm_mmu_alloc_page(vcpu, 0);
660 ASSERT(VALID_PAGE(context->root_hpa));
661 vmcs_writel(GUEST_CR3, context->root_hpa |
662 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
667 static int paging32_init_context(struct litevm_vcpu *vcpu)
670 struct litevm_mmu *context = &vcpu->mmu;
672 context->new_cr3 = paging_new_cr3;
673 context->page_fault = paging32_page_fault;
674 context->inval_page = paging_inval_page;
675 context->gva_to_gpa = paging32_gva_to_gpa;
676 context->free = paging_free;
677 context->root_level = PT32_ROOT_LEVEL;
678 context->shadow_root_level = PT32E_ROOT_LEVEL;
679 context->root_hpa = litevm_mmu_alloc_page(vcpu, 0);
680 ASSERT(VALID_PAGE(context->root_hpa));
681 vmcs_writel(GUEST_CR3, context->root_hpa |
682 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
687 static int paging32E_init_context(struct litevm_vcpu *vcpu)
692 if ((ret = paging64_init_context(vcpu))) {
697 vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
698 vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
703 static int init_litevm_mmu(struct litevm_vcpu *vcpu)
707 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
711 return nonpaging_init_context(vcpu);
712 } else if (is_long_mode()) {
714 return paging64_init_context(vcpu);
715 } else if (is_pae()) {
717 return paging32E_init_context(vcpu);
720 return paging32_init_context(vcpu);
724 static void destroy_litevm_mmu(struct litevm_vcpu *vcpu)
728 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
729 vcpu->mmu.free(vcpu);
730 vcpu->mmu.root_hpa = INVALID_PAGE;
735 int litevm_mmu_reset_context(struct litevm_vcpu *vcpu)
738 destroy_litevm_mmu(vcpu);
740 return init_litevm_mmu(vcpu);
743 static void free_mmu_pages(struct litevm_vcpu *vcpu)
746 /* todo: use the right macros */
747 while (!LIST_EMPTY(&vcpu->link)) {
748 struct litevm_mmu_page *vmpage;
749 vmpage = LIST_FIRST(&vcpu->link);
750 LIST_REMOVE(vmpage, link);
751 uintptr_t ppn = vmpage->page_hpa >> PAGE_SHIFT;
752 page_decref(ppn2page(ppn));
753 assert(page_is_free(ppn));
754 vmpage->page_hpa = INVALID_PAGE;
759 static int alloc_mmu_pages(struct litevm_vcpu *vcpu)
766 /* we could try to do the contiguous alloc but it's not
767 * necessary for them to be contiguous.
769 for (i = 0; i < LITEVM_NUM_MMU_PAGES; i++) {
771 struct litevm_mmu_page *page_header = &vcpu->page_header_buf[i];
773 if (kpage_alloc(&page) != ESUCCESS)
775 page->pg_private = page_header;
776 page_header->page_hpa = (hpa_t) page2pa(page);
777 memset(KADDR(page_header->page_hpa), 0, PAGE_SIZE);
778 LIST_INSERT_HEAD(&vcpu->link, page_header, link);
784 free_mmu_pages(vcpu);
789 int litevm_mmu_init(struct litevm_vcpu *vcpu)
795 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
796 ASSERT(LIST_EMPTY(&vcpu->link));
798 if ((r = alloc_mmu_pages(vcpu))) {
803 if ((r = init_litevm_mmu(vcpu))) {
804 free_mmu_pages(vcpu);
812 void litevm_mmu_destroy(struct litevm_vcpu *vcpu)
817 destroy_litevm_mmu(vcpu);
818 free_mmu_pages(vcpu);
822 void litevm_mmu_slot_remove_write_access(struct litevm *litevm, int slot)
825 struct litevm_mmu_page *page, *link;
827 LIST_FOREACH(page, &litevm->link, link) {
831 if (!GET_BITMASK_BIT((uint8_t *) & page->slot_bitmap, slot))
834 pt = KADDR(page->page_hpa);
835 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
837 if (pt[i] & PT_WRITABLE_MASK)
838 pt[i] &= ~PT_WRITABLE_MASK;