VMX sort of worked.
authorRonald G. Minnich <rminnich@google.com>
Fri, 7 Feb 2014 21:47:03 +0000 (13:47 -0800)
committerRonald G. Minnich <rminnich@google.com>
Fri, 7 Feb 2014 21:47:03 +0000 (13:47 -0800)
This actually does a vm enter, and catches the vmexit. No GPFs
or bad faults. We seem to have started a guest. But we'll see.

Signed-off-by: Ronald G. Minnich <rminnich@google.com>
kern/arch/x86/vmx.c
kern/arch/x86/vmx_mmu.c
kern/drivers/dev/vm.c
kern/kfs/runvm

index 86ef266..8501b9d 100644 (file)
@@ -39,7 +39,9 @@
 #define QLOCK_init(x) {printk("qlock_init %p\n", x); qlock_init(x); printk("%p lock_inited\n", x);}
 #define QLOCK(x) {printk("qlock %p\n", x); qlock(x); printk("%p locked\n", x);}
 #define QUNLOCK(x) {printk("qunlock %p\n", x); qunlock(x); printk("%p unlocked\n", x);}
-
+#define SPLI_irqsave(x){printk("spin_lock_init %p:", x); spinlock_init(x); printk("inited\n");}
+#define SPLL(x){printk("spin_lock %p\n", x); spin_lock_irqsave(x); printk("%p locked\n", x);}
+#define SPLU(x){printk("spin_unlock %p\n", x); spin_unlock(x); printk("%p unlocked\n", x);}
 struct litevm_stat litevm_stat;
 
 static struct litevm_stats_debugfs_item {
@@ -68,6 +70,13 @@ static const uint32_t vmx_msr_index[] = {
        MSR_EFER,       // wtf? MSR_K6_STAR,
 };
 
+static const char* vmx_msr_name[] = {
+#ifdef __x86_64__
+       "MSR_SYSCALL_MASK", "MSR_LSTAR", "MSR_CSTAR", "MSR_KERNEL_GS_BASE",
+#endif
+       "MSR_EFER",     // wtf? MSR_K6_STAR,
+};
+
 #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index))
 
 #ifdef __x86_64__
@@ -633,6 +642,7 @@ struct litevm *vmx_open(void)
        struct litevm *litevm = kzmalloc(sizeof(struct litevm), KMALLOC_WAIT);
        int i;
 
+       printk("vmx_open: litevm is %p\n", litevm);
        if (!litevm) {
                printk("NO LITEVM! MAKES NO SENSE!\n");
                error("litevm alloc failed");
@@ -640,13 +650,15 @@ struct litevm *vmx_open(void)
                return 0;
        }
 
-       spinlock_init_irqsave(&litevm->lock);
+       SPLI_irqsave(&litevm->lock);
        LIST_INIT(&litevm->link);
        for (i = 0; i < LITEVM_MAX_VCPUS; ++i) {
                struct litevm_vcpu *vcpu = &litevm->vcpus[i];
+               printk("init vcpu %p\n", vcpu);
 
                QLOCK_init(&vcpu->mutex);
                vcpu->mmu.root_hpa = INVALID_PAGE;
+               vcpu->litevm = litevm;
                LIST_INIT(&vcpu->link);
        }
        printk("vmx_open: busy %d\n", litevm->busy);
@@ -987,7 +999,7 @@ static void __set_cr0(struct litevm_vcpu *vcpu, unsigned long cr0)
                enter_pmode(vcpu);
 
        if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
-               error("real mode is not supported yet\n"); //enter_rmode(vcpu);
+               enter_rmode(vcpu);
 
 #ifdef __x86_64__
        if (vcpu->shadow_efer & EFER_LME) {
@@ -1014,7 +1026,7 @@ static int pdptrs_have_reserved_bits_set(struct litevm_vcpu *vcpu,
        uint64_t *pdpt;
        struct litevm_memory_slot *memslot;
 
-       spin_lock_irqsave(&vcpu->litevm->lock);
+       SPLL(&vcpu->litevm->lock);
        memslot = gfn_to_memslot(vcpu->litevm, pdpt_gfn);
        /* FIXME: !memslot - emulate? 0xff? */
        pdpt = page2kva(gfn_to_page(memslot, pdpt_gfn));
@@ -1025,7 +1037,7 @@ static int pdptrs_have_reserved_bits_set(struct litevm_vcpu *vcpu,
                        break;
        }
 
-       spin_unlock(&vcpu->litevm->lock);
+       SPLU(&vcpu->litevm->lock);
 
        print_func_exit();
        return i != 4;
@@ -1149,9 +1161,9 @@ static void set_cr4(struct litevm_vcpu *vcpu, unsigned long cr4)
                return;
        }
        __set_cr4(vcpu, cr4);
-       spin_lock_irqsave(&vcpu->litevm->lock);
+       SPLL(&vcpu->litevm->lock);
        litevm_mmu_reset_context(vcpu);
-       spin_unlock(&vcpu->litevm->lock);
+       SPLU(&vcpu->litevm->lock);
        print_func_exit();
 }
 
@@ -1181,9 +1193,9 @@ static void set_cr3(struct litevm_vcpu *vcpu, unsigned long cr3)
        }
 
        vcpu->cr3 = cr3;
-       spin_lock_irqsave(&vcpu->litevm->lock);
+       SPLL(&vcpu->litevm->lock);
        vcpu->mmu.new_cr3(vcpu);
-       spin_unlock(&vcpu->litevm->lock);
+       SPLU(&vcpu->litevm->lock);
        print_func_exit();
 }
 
@@ -1261,6 +1273,7 @@ static void vmcs_write32_fixedbits(uint32_t msr, uint32_t vmcs_field,
 static int litevm_vcpu_setup(struct litevm_vcpu *vcpu)
 {
        print_func_entry();
+
 /* no op on x86_64 */
 #define asmlinkage
        extern asmlinkage void litevm_vmx_return(void);
@@ -1273,10 +1286,6 @@ static int litevm_vcpu_setup(struct litevm_vcpu *vcpu)
        uint64_t tsc;
        int nr_good_msrs;
 
-       if (!init_rmode_tss(vcpu->litevm)) {
-               error("vcpu_setup: init_rmode_tss failed");
-       }
-
        memset(vcpu->regs, 0, sizeof(vcpu->regs));
        vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
        vcpu->cr8 = 0;
@@ -1516,9 +1525,10 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
                error("%d is out of range; LITEVM_MAX_VCPUS is %d", n,
                          LITEVM_MAX_VCPUS);
        }
-
+       printk("LOCK %p, locked %d\n", &litevm->lock, spin_locked(&litevm->lock));
        vcpu = &litevm->vcpus[n];
 
+       printk("vmx_create_vcpu: @%d, %p\n", n, vcpu);
        QLOCK(&vcpu->mutex);
 
        if (vcpu->vmcs) {
@@ -1526,7 +1536,7 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
                printk("VM already exists\n");
                error("VM already exists");
        }
-
+       printk("LOCK %p, locked %d\n", &litevm->lock, spin_locked(&litevm->lock));
        /* I'm a bad person */
        //ALIGN(vcpu->fx_buf, FX_IMAGE_ALIGN);
        uint64_t a = (uint64_t) vcpu->fx_buf;
@@ -1539,17 +1549,17 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
 
        vcpu->cpu = -1; /* First load will set up TR */
        vcpu->litevm = litevm;
-
+       printk("LOCK %p, locked %d\n", &litevm->lock, spin_locked(&litevm->lock));
        if (waserror()){
-               printk("ERR 1 in %s\n", __func__);
+               printk("ERR 1 in %s, %s\n", __func__, current_errstr());
                QUNLOCK(&vcpu->mutex);
                litevm_free_vcpu(vcpu);
                nexterror();
        }
-
+       printk("LOCK %p, locked %d\n", &litevm->lock, spin_locked(&litevm->lock));
        vmcs = alloc_vmcs();
        vmcs_clear(vmcs);
-
+       printk("LOCK %p, locked %d\n", &litevm->lock, spin_locked(&litevm->lock));
        printk("after vmcs_clear\n");
        vcpu->vmcs = vmcs;
        printk("vcpu %p set vmcs to %p\n", vcpu, vmcs);
@@ -1561,13 +1571,34 @@ int vmx_create_vcpu(struct litevm *litevm, int n)
        printk("PAST vcpu_load\n");
        if (waserror()) {
                /* we really need to fix waserror() */
-               printk("vcpu_setup failed\n");
+               printk("vcpu_setup failed: %s\n", current_errstr());
                QUNLOCK(&vcpu->mutex);
                nexterror();
        }
 
-       /* ignore the real mode stuff. */
-       r = 1; //litevm_vcpu_setup(vcpu);
+       /* need memory for the rmode_tss. I have no idea how this happened
+        * originally in kvm.
+        */
+       /* this sucks. */
+       QUNLOCK(&vcpu->mutex);
+       void *v;
+       struct litevm_memory_region vmr;
+       vmr.slot = 0;
+       vmr.flags = 0;
+       vmr.guest_phys_addr = /* guess. */ 0x1000000;
+       vmr.memory_size = 0x10000;
+       if (vm_set_memory_region(litevm, &vmr))
+               printk("vm_set_memory_region failed");
+
+       printk("set memory region done\n");
+
+       if (!init_rmode_tss(litevm)) {
+               error("vcpu_setup: init_rmode_tss failed");
+       }
+
+
+       QLOCK(&vcpu->mutex);
+       r = litevm_vcpu_setup(vcpu);
 
        vcpu_put(vcpu);
 
@@ -1615,9 +1646,10 @@ int vm_set_memory_region(struct litevm *litevm,
 
        if (!mem)
                error("NULL mem in %s", __func__);
-
+       /* I don't care right now. *
        if (litevm->busy)
                error("litevm->busy is set! 0x%x\n", litevm->busy);
+       */
        r = -EINVAL;
        /* General sanity checks */
        if (mem->memory_size & (PAGE_SIZE - 1))
@@ -1645,16 +1677,20 @@ int vm_set_memory_region(struct litevm *litevm,
         */
 raced:
        printk("raced: pass %d\n", pass);
-       spin_lock_irqsave(&litevm->lock);
+       printk("LOCK %p, locked %d\n", &litevm->lock, spin_locked(&litevm->lock));
+       monitor(NULL);
+       SPLL(&litevm->lock);
        printk("locked\n");
 
        if (waserror()) {
-               spin_unlock(&litevm->lock);
+               printk("error in %s, %s\n", __func__, current_errstr());
+               SPLU(&litevm->lock);
                nexterror();
        }
 
        memory_config_version = litevm->memory_config_version;
        new = old = *memslot;
+       printk("memory_config_version %d\n", memory_config_version);
 
        new.base_gfn = base_gfn;
        new.npages = npages;
@@ -1681,7 +1717,7 @@ raced:
         * Do memory allocations outside lock.  memory_config_version will
         * detect any races.
         */
-       spin_unlock(&litevm->lock);
+       SPLU(&litevm->lock);
        printk("unlocked\n");
        poperror();
 
@@ -1730,10 +1766,10 @@ raced:
                }
        }
 
-       spin_lock_irqsave(&litevm->lock);
+       SPLL(&litevm->lock);
        printk("locked\n");
        if (memory_config_version != litevm->memory_config_version) {
-               spin_unlock(&litevm->lock);
+               SPLU(&litevm->lock);
                printk("unlocked, try again\n");
                litevm_free_physmem_slot(&new, &old);
                goto raced;
@@ -1751,7 +1787,7 @@ raced:
        *memslot = new;
        ++litevm->memory_config_version;
 
-       spin_unlock(&litevm->lock);
+       SPLU(&litevm->lock);
        printk("unlocked\n");
        for (i = 0; i < LITEVM_MAX_VCPUS; ++i) {
                struct litevm_vcpu *vcpu;
@@ -1770,7 +1806,7 @@ raced:
        return 0;
 
 out_unlock:
-       spin_unlock(&litevm->lock);
+       SPLU(&litevm->lock);
        printk("out_unlock\n");
 out_free:
        printk("out_free\n");
@@ -1793,14 +1829,14 @@ static int litevm_dev_ioctl_get_dirty_log(struct litevm *litevm,
        int n;
        unsigned long any = 0;
 
-       spin_lock_irqsave(&litevm->lock);
+       SPLL(&litevm->lock);
 
        /*
         * Prevent changes to guest memory configuration even while the lock
         * is not taken.
         */
        ++litevm->busy;
-       spin_unlock(&litevm->lock);
+       SPLU(&litevm->lock);
        r = -EINVAL;
        if (log->slot >= LITEVM_MEMORY_SLOTS)
                goto out;
@@ -1820,9 +1856,9 @@ static int litevm_dev_ioctl_get_dirty_log(struct litevm *litevm,
                goto out;
 
        if (any) {
-               spin_lock_irqsave(&litevm->lock);
+               SPLL(&litevm->lock);
                litevm_mmu_slot_remove_write_access(litevm, log->slot);
-               spin_unlock(&litevm->lock);
+               SPLU(&litevm->lock);
                memset(memslot->dirty_bitmap, 0, n);
                for (i = 0; i < LITEVM_MAX_VCPUS; ++i) {
                        struct litevm_vcpu *vcpu = vcpu_load(litevm, i);
@@ -1837,9 +1873,9 @@ static int litevm_dev_ioctl_get_dirty_log(struct litevm *litevm,
        r = 0;
 
 out:
-       spin_lock_irqsave(&litevm->lock);
+       SPLL(&litevm->lock);
        --litevm->busy;
-       spin_unlock(&litevm->lock);
+       SPLU(&litevm->lock);
        return r;
 }
 #endif
@@ -2275,15 +2311,15 @@ static int handle_exception(struct litevm_vcpu *vcpu,
        if (is_page_fault(intr_info)) {
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
 
-               spin_lock_irqsave(&vcpu->litevm->lock);
+               SPLL(&vcpu->litevm->lock);
                if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) {
-                       spin_unlock(&vcpu->litevm->lock);
+                       SPLU(&vcpu->litevm->lock);
                        print_func_exit();
                        return 1;
                }
 
                er = emulate_instruction(vcpu, litevm_run, cr2, error_code);
-               spin_unlock(&vcpu->litevm->lock);
+               SPLU(&vcpu->litevm->lock);
 
                switch (er) {
                        case EMULATE_DONE:
@@ -2417,9 +2453,9 @@ static int handle_invlpg(struct litevm_vcpu *vcpu,
        print_func_entry();
        uint64_t address = vmcs_read64(EXIT_QUALIFICATION);
        int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
-       spin_lock_irqsave(&vcpu->litevm->lock);
+       SPLL(&vcpu->litevm->lock);
        vcpu->mmu.inval_page(vcpu, address);
-       spin_unlock(&vcpu->litevm->lock);
+       SPLU(&vcpu->litevm->lock);
        vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length);
        print_func_exit();
        return 1;
@@ -2921,8 +2957,15 @@ static void load_msrs(struct vmx_msr_entry *e, int n)
        print_func_entry();
        int i;
 
-       for (i = 0; i < n; ++i)
+       if (! e) {
+               printk("LOAD MSR WITH NULL POINTER?");
+               error("LOAD MSR WITH NULL POINTER?");
+       }
+       for (i = 0; i < n; ++i) {
+               printk("Load MSR (%lx), with %lx\n", e[i].index, e[i].data);
                write_msr(e[i].index, e[i].data);
+               printk("Done\n");
+       }
        print_func_exit();
 }
 
@@ -3426,13 +3469,13 @@ static int litevm_dev_ioctl_translate(struct litevm *litevm,
                print_func_exit();
                return -ENOENT;
        }
-       spin_lock_irqsave(&litevm->lock);
+       SPLL(&litevm->lock);
        gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
        tr->writeable = 1;
        tr->usermode = 0;
-       spin_unlock(&litevm->lock);
+       SPLU(&litevm->lock);
        vcpu_put(vcpu);
 
        print_func_exit();
index a85082e..e131b86 100644 (file)
@@ -762,11 +762,15 @@ static int alloc_mmu_pages(struct litevm_vcpu *vcpu)
                struct page *page;
                struct litevm_mmu_page *page_header = &vcpu->page_header_buf[i];
 
-               if ((page = kpage_alloc_addr()) == NULL)
+               if (kpage_alloc(&page) != ESUCCESS)
                        goto error_1;
+               printk("page_header %p, page %p\n", page_header, page);
                page->pg_private = page_header;
-               page_header->page_hpa = (hpa_t) page2ppn(page) << PAGE_SHIFT;
+               printk("page2ppn(page) is %lx\n", page2ppn(page));
+               page_header->page_hpa = (hpa_t) page2pa(page);
+               printk("page_hpa is %lx\n", page_header->page_hpa);
                memset(KADDR(page_header->page_hpa), 0, PAGE_SIZE);
+               printk("INSERT\n");
                LIST_INSERT_HEAD(&vcpu->link, page_header, link);
        }
        print_func_exit();
index 3c452aa..0554b45 100644 (file)
@@ -37,25 +37,18 @@ enum {
        Qimage,
 };
 
-/* This paddr/kaddr is a bit dangerous.  it'll work so long as we don't need all
- * 64 bits for a physical address (48 is the current norm on x86_64).
- * We're probably going to move to a model where we put the VM index or something
- * into the qid, but this works for now.
+/* The QID is the TYPE and the index into the vms array.
+ * We reserve the right to make it an id later.
  */
-#define ADDR_SHIFT 5
-#define QID2VM(q) ((struct vm*)KADDR(((q).path >> ADDR_SHIFT)))
-#define TYPE(q) ((q).path & ((1 << ADDR_SHIFT) - 1))
-#define QID(ptr, type) ((PADDR(ptr) << ADDR_SHIFT) | type)
-
+#define ID_SHIFT 5
 /* vm's have an image.
  * Note that the image can be read even as it is running. */
 struct vm {
-       struct vm *next;
        struct kref kref;
        /* should this be an array of pages? Hmm. */
        void *image;
        unsigned long imagesize;
-       int id;
+       int id; // not used yet. 
        struct litevm *archvm;
 };
 
@@ -68,6 +61,23 @@ static int vmok = 0;
 static spinlock_t vmidlock[1];
 static struct kref vmid[1] = { {(void *)1, fake_release} };
 
+static inline struct vm *
+QID2VM(struct qid q)
+{
+       return &vms[((q).path >> ID_SHIFT)];
+}
+
+static inline int 
+TYPE(struct qid q)
+{
+       return ((q).path & ((1 << ID_SHIFT) - 1));
+}
+
+static inline int QID(int index, int type)
+{
+       return ((index << ID_SHIFT) | type);
+}
+
 /* we'll need this somewhere more generic. */
 static void readn(struct chan *c, void *vp, long n)
 {
@@ -90,6 +100,7 @@ static void readn(struct chan *c, void *vp, long n)
        print_func_exit();
 }
 
+/* not called yet.  -- we have to unlink the vm */
 static void vm_release(struct kref *kref)
 {
        print_func_entry();
@@ -173,7 +184,8 @@ static int vmgen(struct chan *c, char *entry_name,
                        vm_i = &vms[s];
                        snprintf(get_cur_genbuf(), GENBUF_SZ, "vm%d", vm_i->id);
                        spin_unlock(&vmlock);
-                       mkqid(&q, QID(vm_i, Qvmdir), 0, QTDIR);
+                       printk("clone vm_i is %p\n", vm_i);
+                       mkqid(&q, QID(s, Qvmdir), 0, QTDIR);
                        devdir(c, q, get_cur_genbuf(), 0, eve, 0555, dp);
                        print_func_exit();
                        return 1;
@@ -182,12 +194,12 @@ static int vmgen(struct chan *c, char *entry_name,
                        s += Qctl;      /* first time through, start on Qctl */
                        switch (s) {
                                case Qctl:
-                                       mkqid(&q, QID(QID2VM(c->qid), Qctl), 0, QTFILE);
+                                       mkqid(&q, QID(s-Qctl, Qctl), 0, QTFILE);
                                        devdir(c, q, "ctl", 0, eve, 0666, dp);
                                        print_func_exit();
                                        return 1;
                                case Qimage:
-                                       mkqid(&q, QID(QID2VM(c->qid), Qimage), 0, QTFILE);
+                                       mkqid(&q, QID(s-Qctl, Qimage), 0, QTFILE);
                                        devdir(c, q, "image", 0, eve, 0666, dp);
                                        print_func_exit();
                                        return 1;
@@ -293,7 +305,7 @@ static struct chan *vmopen(struct chan *c, int omode)
                        spin_unlock(&vmlock);
                        kref_init(&v->kref, vm_release, 1);
                        v->id = newvmid();
-                       mkqid(&c->qid, QID(v, Qctl), 0, QTFILE);
+                       mkqid(&c->qid, QID(nvm, Qctl), 0, QTFILE);
                        c->aux = v;
                        printd("New VM id %d\n", v->id);
                        v->archvm = vmx_open();
@@ -305,13 +317,16 @@ static struct chan *vmopen(struct chan *c, int omode)
                                printk("vm_create failed");
                                error("vm_create failed");
                        }
+                       printk("Qclone open: id %d, v is %p, v->archvm is %p\n", 
+                                       nvm-1,
+                                       v, v->archvm);
                        break;
                case Qstat:
                        break;
                case Qctl:
                case Qimage:
                        c->aux = QID2VM(c->qid);
-                       printk("open qctl: aux is %p\n", c->aux);
+                       printk("open qctl: aux (vm) is %p\n", c->aux);
                        break;
        }
        c->mode = openmode(omode);
@@ -416,6 +431,8 @@ static long vmwrite(struct chan *c, void *ubuf, long n, int64_t unused)
                        error(Eperm);
                case Qctl:
                        vm = c->aux;
+                       litevm = vm->archvm;
+                       printk("qctl: vm is %p, litevm is %p\n", vm, litevm);
                        cb = parsecmd(ubuf, n);
                        if (waserror()) {
                                kfree(cb);
@@ -425,7 +442,6 @@ static long vmwrite(struct chan *c, void *ubuf, long n, int64_t unused)
                                int ret;
                                if (cb->nf != 4)
                                        error("usage: run vcpu emulated mmio_completed");
-                               litevm = vm->archvm;
                                struct litevm_run vmr;
                                vmr.vcpu = strtoul(cb->f[1], NULL, 0);
                                vmr.emulated = strtoul(cb->f[2], NULL, 0);
@@ -436,11 +452,10 @@ static long vmwrite(struct chan *c, void *ubuf, long n, int64_t unused)
                                return ret;
                        } else if (!strcmp(cb->f[0], "stop")) {
                                error("can't stop a vm yet");
-                       } else if (!strcmp(cb->f[0], "fillmem")) {
+                       } else if (!strcmp(cb->f[0], "mapmem")) {
                                struct chan *file;
                                void *v;
                                vm = c->aux;
-                               litevm = vm->archvm;
                                uint64_t filesize;
                                struct litevm_memory_region vmr;
                                int got;
@@ -482,13 +497,12 @@ static long vmwrite(struct chan *c, void *ubuf, long n, int64_t unused)
                        } else if (!strcmp(cb->f[0], "region")) {
                                void *v;
                                struct litevm_memory_region vmr;
-                               litevm = vm->archvm;
                                if (cb->nf != 5)
                                        error("usage: mapmem slot flags addr size");
-                               vmr.slot = strtoul(cb->f[2], NULL, 0);
-                               vmr.flags = strtoul(cb->f[3], NULL, 0);
-                               vmr.guest_phys_addr = strtoul(cb->f[4], NULL, 0);
-                               vmr.memory_size = strtoul(cb->f[5], NULL, 0);
+                               vmr.slot = strtoul(cb->f[1], NULL, 0);
+                               vmr.flags = strtoul(cb->f[2], NULL, 0);
+                               vmr.guest_phys_addr = strtoul(cb->f[3], NULL, 0);
+                               vmr.memory_size = strtoul(cb->f[4], NULL, 0);
                                if (vm_set_memory_region(litevm, &vmr))
                                        error("vm_set_memory_region failed");
                        } else {
index 26a7670..8c1b8ac 100644 (file)
@@ -2,8 +2,8 @@
 cat '#V/clone'
 ls '#V'
 cp /bin/hlt '#r'
-echo fillmem '#r/hlt' 0 0 0 0x2001 > '#V/vm0/ctl'
-
+# our kernel.
+echo mapmem '#r/hlt' 1 0 0 0x2001 > '#V/vm0/ctl'
 echo 'run 0 0 0' > '#V/vm0/ctl'