Stub out kernel support for VM contexts (XCC)
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 20 Jan 2016 22:22:53 +0000 (17:22 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Tue, 2 Feb 2016 22:43:52 +0000 (17:43 -0500)
The places that need implemented are marked with TODO: (VMCTX).  This just
provides the framework for the actual kernel support and a first cut at
what a VM context looks like.

Reinstall your kernel headers.

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
12 files changed:
kern/arch/riscv/process.c
kern/arch/riscv/ros/trapframe.h
kern/arch/riscv/trap.c
kern/arch/riscv/trap.h
kern/arch/x86/process64.c
kern/arch/x86/ros/trapframe64.h
kern/arch/x86/trap.c
kern/arch/x86/trap64.c
kern/arch/x86/trap64.h
kern/include/ros/trapframe.h
kern/include/trap.h
kern/src/trap.c

index bdedee3..7b72d2e 100644 (file)
@@ -8,7 +8,6 @@
 #include <assert.h>
 #include <stdio.h>
 
-/* TODO: handle user and kernel contexts */
 void proc_pop_ctx(struct user_context *ctx)
 {
        struct hw_trapframe *tf = &ctx->tf.hw_tf;
@@ -46,7 +45,7 @@ void proc_init_ctx(struct user_context *ctx, uint32_t vcoreid, uintptr_t entryp,
        tf->gpr[GPR_A0] = vcoreid;
 }
 
-/* TODO: handle both HW and SW contexts */
+/* TODO: handle SW and VM contexts */
 void proc_secure_ctx(struct user_context *ctx)
 {
        struct hw_trapframe *tf = &ctx->tf.hw_tf;
index 1b5b74f..b632529 100644 (file)
@@ -20,6 +20,10 @@ struct sw_trapframe {
        /* TODO */
 };
 
+struct vm_trapframe {
+       /* TODO */
+};
+
 #define GPR_RA 1
 #define GPR_SP 14
 #define GPR_A0 18
index 6141ae2..b41cd3e 100644 (file)
@@ -112,14 +112,23 @@ format_trapframe(struct hw_trapframe *hw_tf, char* buf, int bufsz)
        return len;
 }
 
-void
-print_trapframe(struct hw_trapframe *hw_tf)
+void print_trapframe(struct hw_trapframe *hw_tf)
 {
        char buf[1024];
        int len = format_trapframe(hw_tf, buf, sizeof(buf));
        cputbuf(buf,len);
 }
 
+void print_swtrapframe(struct sw_trapframe *sw_tf)
+{
+       #warning "fix me"
+}
+
+void print_vmtrapframe(struct vm_trapframe *vm_tf)
+{
+       #warning "fix me"
+}
+
 static void exit_halt_loop(struct hw_trapframe *hw_tf)
 {
        extern char after_cpu_halt;
index 28c313a..fbe8aff 100644 (file)
@@ -56,6 +56,18 @@ static inline uintptr_t get_swtf_fp(struct sw_trapframe *sw_tf)
        //return sw_tf->tf_rbp;
 }
 
+static inline uintptr_t get_vmtf_pc(struct vm_trapframe *vm_tf)
+{
+       #warning "fix me"
+       return 0;
+}
+
+static inline uintptr_t get_vmtf_fp(struct vm_trapframe *vm_tf)
+{
+       #warning "fix me"
+       return 0;
+}
+
 static inline void __attribute__((always_inline))
 set_stack_pointer(uintptr_t sp)
 {
index 0397091..362f5b1 100644 (file)
@@ -8,74 +8,96 @@
 #include <assert.h>
 #include <stdio.h>
 
-void proc_pop_ctx(struct user_context *ctx)
+static void __attribute__((noreturn)) proc_pop_hwtf(struct hw_trapframe *tf)
 {
-       disable_irq();
        /* for both HW and SW, note we pass an offset into the TF, beyond the fs and
         * gs bases */
-       if (ctx->type == ROS_HW_CTX) {
-               struct hw_trapframe *tf = &ctx->tf.hw_tf;
+       if (x86_hwtf_is_partial(tf)) {
+               swap_gs();
+       } else {
+               write_msr(MSR_GS_BASE, (uint64_t)tf->tf_gsbase);
+               write_msr(MSR_FS_BASE, (uint64_t)tf->tf_fsbase);
+       }
+       asm volatile ("movq %0, %%rsp;          "
+                     "popq %%rax;              "
+                     "popq %%rbx;              "
+                     "popq %%rcx;              "
+                     "popq %%rdx;              "
+                     "popq %%rbp;              "
+                     "popq %%rsi;              "
+                     "popq %%rdi;              "
+                     "popq %%r8;               "
+                     "popq %%r9;               "
+                     "popq %%r10;              "
+                     "popq %%r11;              "
+                     "popq %%r12;              "
+                     "popq %%r13;              "
+                     "popq %%r14;              "
+                     "popq %%r15;              "
+                     "addq $0x10, %%rsp;       "
+                     "iretq                    "
+                     : : "g" (&tf->tf_rax) : "memory");
+       panic("iretq failed");
+}
 
-               if (x86_hwtf_is_partial(tf)) {
-                       swap_gs();
-               } else {
-                       write_msr(MSR_GS_BASE, (uint64_t)tf->tf_gsbase);
-                       write_msr(MSR_FS_BASE, (uint64_t)tf->tf_fsbase);
-               }
-               asm volatile ("movq %0, %%rsp;          "
-                             "popq %%rax;              "
-                             "popq %%rbx;              "
-                             "popq %%rcx;              "
-                             "popq %%rdx;              "
-                             "popq %%rbp;              "
-                             "popq %%rsi;              "
-                             "popq %%rdi;              "
-                             "popq %%r8;               "
-                             "popq %%r9;               "
-                             "popq %%r10;              "
-                             "popq %%r11;              "
-                             "popq %%r12;              "
-                             "popq %%r13;              "
-                             "popq %%r14;              "
-                             "popq %%r15;              "
-                             "addq $0x10, %%rsp;       "
-                             "iretq                    "
-                             : : "g" (&tf->tf_rax) : "memory");
-               panic("iretq failed");
+static void __attribute__((noreturn)) proc_pop_swtf(struct sw_trapframe *tf)
+{
+       if (x86_swtf_is_partial(tf)) {
+               swap_gs();
        } else {
-               struct sw_trapframe *tf = &ctx->tf.sw_tf;
+               write_msr(MSR_GS_BASE, (uint64_t)tf->tf_gsbase);
+               write_msr(MSR_FS_BASE, (uint64_t)tf->tf_fsbase);
+       }
+       /* We need to 0 out any registers that aren't part of the sw_tf and that we
+        * won't use/clobber on the out-path.  While these aren't part of the sw_tf,
+        * we also don't want to leak any kernel register content. */
+       asm volatile ("movq %0, %%rsp;          "
+                     "movq $0, %%rax;          "
+                     "movq $0, %%rdx;          "
+                     "movq $0, %%rsi;          "
+                     "movq $0, %%rdi;          "
+                     "movq $0, %%r8;           "
+                     "movq $0, %%r9;           "
+                     "movq $0, %%r10;          "
+                     "popq %%rbx;              "
+                     "popq %%rbp;              "
+                     "popq %%r12;              "
+                     "popq %%r13;              "
+                     "popq %%r14;              "
+                     "popq %%r15;              "
+                     "movq %1, %%r11;          "
+                     "popq %%rcx;              "
+                     "popq %%rsp;              "
+                     "rex.w sysret             "
+                     : : "g"(&tf->tf_rbx), "i"(FL_IF) : "memory");
+       panic("sysret failed");
+}
+
+static void __attribute__((noreturn)) proc_pop_vmtf(struct vm_trapframe *tf)
+{
+       /* This function probably will be able to fail internally.  If that happens,
+        * we'll just build a dummy SW TF and pop that instead. */
+       /* TODO: (VMCTX) */
+       panic("Not implemented");
+}
 
-               if (x86_swtf_is_partial(tf)) {
-                       swap_gs();
-               } else {
-                       write_msr(MSR_GS_BASE, (uint64_t)tf->tf_gsbase);
-                       write_msr(MSR_FS_BASE, (uint64_t)tf->tf_fsbase);
-               }
-               /* We need to 0 out any registers that aren't part of the sw_tf and that
-                * we won't use/clobber on the out-path.  While these aren't part of the
-                * sw_tf, we also don't want to leak any kernel register content. */
-               asm volatile ("movq %0, %%rsp;          "
-                             "movq $0, %%rax;          "
-                                         "movq $0, %%rdx;          "
-                                         "movq $0, %%rsi;          "
-                                         "movq $0, %%rdi;          "
-                                         "movq $0, %%r8;           "
-                                         "movq $0, %%r9;           "
-                                         "movq $0, %%r10;          "
-                             "popq %%rbx;              "
-                             "popq %%rbp;              "
-                             "popq %%r12;              "
-                             "popq %%r13;              "
-                             "popq %%r14;              "
-                             "popq %%r15;              "
-                                         "movq %1, %%r11;          "
-                             "popq %%rcx;              "
-                             "popq %%rsp;              "
-                             "rex.w sysret             "
-                             : : "g"(&tf->tf_rbx), "i"(FL_IF) : "memory");
-               panic("sysret failed");
+void proc_pop_ctx(struct user_context *ctx)
+{
+       disable_irq();
+       switch (ctx->type) {
+       case ROS_HW_CTX:
+               proc_pop_hwtf(&ctx->tf.hw_tf);
+               break;
+       case ROS_SW_CTX:
+               proc_pop_swtf(&ctx->tf.sw_tf);
+               break;
+       case ROS_VM_CTX:
+               proc_pop_vmtf(&ctx->tf.vm_tf);
+               break;
+       default:
+               /* We should have caught this when securing the ctx */
+               panic("Unknown context type %d!", ctx->type);
        }
-       panic("Unknown context type!\n");
 }
 
 /* Helper: if *addr isn't a canonical user address, poison it.  Use this when
@@ -114,29 +136,53 @@ void proc_init_ctx(struct user_context *ctx, uint32_t vcoreid, uintptr_t entryp,
        proc_secure_ctx(ctx);
 }
 
+static void proc_secure_hwtf(struct hw_trapframe *tf)
+{
+       enforce_user_canon(&tf->tf_gsbase);
+       enforce_user_canon(&tf->tf_fsbase);
+       /* GD_UD is the user data segment selector in the GDT, and
+        * GD_UT is the user text segment selector (see inc/memlayout.h).
+        * The low 2 bits of each segment register contains the
+        * Requestor Privilege Level (RPL); 3 means user mode. */
+       tf->tf_ss = GD_UD | 3;
+       tf->tf_cs = GD_UT | 3;
+       tf->tf_rflags |= FL_IF;
+       x86_hwtf_clear_partial(tf);
+}
+
+static void proc_secure_swtf(struct sw_trapframe *tf)
+{
+       enforce_user_canon(&tf->tf_gsbase);
+       enforce_user_canon(&tf->tf_fsbase);
+       enforce_user_canon(&tf->tf_rip);
+       x86_swtf_clear_partial(tf);
+}
+
+static void proc_secure_vmtf(struct vm_trapframe *tf)
+{
+       /* The user can say whatever it wants for the bulk of the TF, but the only
+        * thing it can't fake is whether or not it is a partial context, which
+        * other parts of the kernel rely on. */
+       x86_vmtf_clear_partial(tf);
+}
+
 void proc_secure_ctx(struct user_context *ctx)
 {
-       if (ctx->type == ROS_SW_CTX) {
-               struct sw_trapframe *tf = &ctx->tf.sw_tf;
-               enforce_user_canon(&tf->tf_gsbase);
-               enforce_user_canon(&tf->tf_fsbase);
-               enforce_user_canon(&tf->tf_rip);
-               x86_swtf_clear_partial(tf);
-       } else {
-               /* If we aren't SW, we're assuming (and forcing) a HW ctx.  If this is
-                * somehow fucked up, userspace should die rather quickly. */
-               struct hw_trapframe *tf = &ctx->tf.hw_tf;
+       switch (ctx->type) {
+       case ROS_HW_CTX:
+               proc_secure_hwtf(&ctx->tf.hw_tf);
+               break;
+       case ROS_SW_CTX:
+               proc_secure_swtf(&ctx->tf.sw_tf);
+               break;
+       case ROS_VM_CTX:
+               proc_secure_vmtf(&ctx->tf.vm_tf);
+               break;
+       default:
+               /* If we aren't another ctx type, we're assuming (and forcing) a HW ctx.
+                * If this is somehow fucked up, userspace should die rather quickly. */
                ctx->type = ROS_HW_CTX;
-               enforce_user_canon(&tf->tf_gsbase);
-               enforce_user_canon(&tf->tf_fsbase);
-               /* GD_UD is the user data segment selector in the GDT, and
-                * GD_UT is the user text segment selector (see inc/memlayout.h).
-                * The low 2 bits of each segment register contains the
-                * Requestor Privilege Level (RPL); 3 means user mode. */
-               tf->tf_ss = GD_UD | 3;
-               tf->tf_cs = GD_UT | 3;
-               tf->tf_rflags |= FL_IF;
-               x86_hwtf_clear_partial(tf);
+               proc_secure_hwtf(&ctx->tf.hw_tf);
        }
 }
 
index af2de4c..7848e29 100644 (file)
@@ -54,3 +54,51 @@ struct sw_trapframe {
        uint16_t tf_fpucw;
        uint16_t tf_padding0;           /* used for partial contexts */
 };
+
+/* The context is both what we want to run and its current state.  For VMs, that
+ * includes status bits from the VMCS for reflected vmexits/hypercalls.  This is
+ * not particularly different than how hardware contexts contain info on
+ * reflected traps.
+ *
+ * The VM context also consists of a mountain of state in the VMCS, referenced
+ * only in here by guest pcoreid.  Those bits are set once by Akaros to sensible
+ * defaults and then are changed during execution of the VM.  The parts of that
+ * state that are exposed to the user-VMM are the contents of the trapframe. */
+
+#define VMCTX_FL_PARTIAL               (1 << 0)
+#define VMCTX_FL_HAS_FAULT             (1 << 1)
+
+struct vm_trapframe {
+       /* Actual processor state */
+       uint64_t tf_rax;
+       uint64_t tf_rbx;
+       uint64_t tf_rcx;
+       uint64_t tf_rdx;
+       uint64_t tf_rbp;
+       uint64_t tf_rsi;
+       uint64_t tf_rdi;
+       uint64_t tf_r8;
+       uint64_t tf_r9;
+       uint64_t tf_r10;
+       uint64_t tf_r11;
+       uint64_t tf_r12;
+       uint64_t tf_r13;
+       uint64_t tf_r14;
+       uint64_t tf_r15;
+       uint64_t tf_rip;
+       uint64_t tf_rflags;
+       uint64_t tf_rsp;
+       uint64_t tf_cr2;
+       uint64_t tf_cr3;
+       /* Admin bits */
+       uint32_t tf_guest_pcoreid;
+       uint32_t tf_flags;
+       uint32_t tf_trap_inject;
+       uint32_t tf_padding0;
+       uint32_t tf_exit_reason;
+       uint32_t tf_exit_qual;
+       uint32_t tf_intrinfo1;
+       uint32_t tf_intrinfo2;
+       uint64_t tf_guest_va;
+       uint64_t tf_guest_pa;
+};
index 01abe92..42af600 100644 (file)
@@ -451,6 +451,17 @@ static void set_current_ctx_sw(struct per_cpu_info *pcpui,
        pcpui->cur_ctx = &pcpui->actual_ctx;
 }
 
+/* TODO: (VMCTX) need to call this after we enter the kernel from a vm ctx. */
+static void set_current_ctx_vm(struct per_cpu_info *pcpui,
+                               struct vm_trapframe *vm_tf)
+{
+       assert(!irq_is_enabled());
+       assert(!pcpui->cur_ctx);
+       pcpui->actual_ctx.type = ROS_VM_CTX;
+       pcpui->actual_ctx.tf.vm_tf = *vm_tf;
+       pcpui->cur_ctx = &pcpui->actual_ctx;
+}
+
 void trap(struct hw_trapframe *hw_tf)
 {
        struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
index 4426d6c..0ed7528 100644 (file)
@@ -102,6 +102,48 @@ void print_swtrapframe(struct sw_trapframe *sw_tf)
        pcpui->__lock_checking_enabled++;
 }
 
+void print_vmtrapframe(struct vm_trapframe *vm_tf)
+{
+       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+
+       pcpui->__lock_checking_enabled--;
+       spin_lock_irqsave(&ptf_lock);
+       printk("VM Trapframe %sat %p on core %d\n",
+              x86_vmtf_is_partial(vm_tf) ? "(partial) " : "",
+              vm_tf, core_id());
+       printk("  rax  0x%016lx\n",           vm_tf->tf_rax);
+       printk("  rbx  0x%016lx\n",           vm_tf->tf_rbx);
+       printk("  rcx  0x%016lx\n",           vm_tf->tf_rcx);
+       printk("  rdx  0x%016lx\n",           vm_tf->tf_rdx);
+       printk("  rbp  0x%016lx\n",           vm_tf->tf_rbp);
+       printk("  rsi  0x%016lx\n",           vm_tf->tf_rsi);
+       printk("  rdi  0x%016lx\n",           vm_tf->tf_rdi);
+       printk("  r8   0x%016lx\n",           vm_tf->tf_r8);
+       printk("  r9   0x%016lx\n",           vm_tf->tf_r9);
+       printk("  r10  0x%016lx\n",           vm_tf->tf_r10);
+       printk("  r11  0x%016lx\n",           vm_tf->tf_r11);
+       printk("  r12  0x%016lx\n",           vm_tf->tf_r12);
+       printk("  r13  0x%016lx\n",           vm_tf->tf_r13);
+       printk("  r14  0x%016lx\n",           vm_tf->tf_r14);
+       printk("  r15  0x%016lx\n",           vm_tf->tf_r15);
+       printk("  rip  0x%016lx\n",           vm_tf->tf_rip);
+       printk("  rflg 0x%016lx\n",           vm_tf->tf_rflags);
+       printk("  rsp  0x%016lx\n",           vm_tf->tf_rsp);
+       printk("  cr2  0x%016lx\n",           vm_tf->tf_cr2);
+       printk("  cr3  0x%016lx\n",           vm_tf->tf_cr3);
+       printk("Gpcore 0x%08x\n",             vm_tf->tf_guest_pcoreid);
+       printk("Flags  0x%08x\n",             vm_tf->tf_flags);
+       printk("Inject 0x%08x\n",             vm_tf->tf_trap_inject);
+       printk("ExitRs 0x%08x\n",             vm_tf->tf_exit_reason);
+       printk("ExitQl 0x%08x\n",             vm_tf->tf_exit_qual);
+       printk("Intr1  0x%016lx\n",           vm_tf->tf_intrinfo1);
+       printk("Intr2  0x%016lx\n",           vm_tf->tf_intrinfo2);
+       printk("GVA    0x%016lx\n",           vm_tf->tf_guest_va);
+       printk("GPA    0x%016lx\n",           vm_tf->tf_guest_pa);
+       spin_unlock_irqsave(&ptf_lock);
+       pcpui->__lock_checking_enabled++;
+}
+
 void __arch_reflect_trap_hwtf(struct hw_trapframe *hw_tf, unsigned int trap_nr,
                               unsigned int err, unsigned long aux)
 {
index bda6ad8..c11d630 100644 (file)
@@ -13,8 +13,6 @@
 #error "Do not include arch/trap64.h directly."
 #endif
 
-void print_swtrapframe(struct sw_trapframe *sw_tf);
-
 static inline bool in_kernel(struct hw_trapframe *hw_tf)
 {
        return (hw_tf->tf_cs & ~3) == GD_KT;
@@ -40,6 +38,16 @@ static inline uintptr_t get_swtf_fp(struct sw_trapframe *sw_tf)
        return sw_tf->tf_rbp;
 }
 
+static inline uintptr_t get_vmtf_pc(struct vm_trapframe *vm_tf)
+{
+       return vm_tf->tf_rip;
+}
+
+static inline uintptr_t get_vmtf_fp(struct vm_trapframe *vm_tf)
+{
+       return vm_tf->tf_rbp;
+}
+
 static inline uintptr_t x86_get_ip_hw(struct hw_trapframe *hw_tf)
 {
        return hw_tf->tf_rip;
@@ -119,6 +127,11 @@ static inline bool x86_swtf_is_partial(struct sw_trapframe *tf)
        return tf->tf_padding0 == 1;
 }
 
+static inline bool x86_vmtf_is_partial(struct vm_trapframe *tf)
+{
+       return tf->tf_flags & VMCTX_FL_PARTIAL ? TRUE : FALSE;
+}
+
 static inline void x86_hwtf_clear_partial(struct hw_trapframe *tf)
 {
        tf->tf_padding0 = 0;
@@ -129,13 +142,20 @@ static inline void x86_swtf_clear_partial(struct sw_trapframe *tf)
        tf->tf_padding0 = 0;
 }
 
+static inline void x86_vmtf_clear_partial(struct vm_trapframe *tf)
+{
+       tf->tf_flags &= ~VMCTX_FL_PARTIAL;
+}
+
 static inline bool arch_ctx_is_partial(struct user_context *ctx)
 {
        switch (ctx->type) {
-       case (ROS_HW_CTX):
+       case ROS_HW_CTX:
                return x86_hwtf_is_partial(&ctx->tf.hw_tf);
-       case (ROS_SW_CTX):
+       case ROS_SW_CTX:
                return x86_swtf_is_partial(&ctx->tf.sw_tf);
+       case ROS_VM_CTX:
+               return x86_vmtf_is_partial(&ctx->tf.vm_tf);
        }
        return FALSE;
 }
@@ -161,6 +181,13 @@ static inline void x86_finalize_swtf(struct sw_trapframe *tf)
        x86_swtf_clear_partial(tf);
 }
 
+static inline void x86_finalize_vmtf(struct vm_trapframe *tf)
+{
+       x86_vmtf_clear_partial(tf);
+       /* TODO: (VMCTX) */
+       panic("Not implemented");
+}
+
 /* Makes sure that the user context is fully saved into ctx and not split across
  * the struct and HW, meaning it is not a "partial context". */
 static inline void arch_finalize_ctx(struct user_context *ctx)
@@ -168,11 +195,14 @@ static inline void arch_finalize_ctx(struct user_context *ctx)
        if (!arch_ctx_is_partial(ctx))
                return;
        switch (ctx->type) {
-       case (ROS_HW_CTX):
+       case ROS_HW_CTX:
                x86_finalize_hwtf(&ctx->tf.hw_tf);
                break;
-       case (ROS_SW_CTX):
+       case ROS_SW_CTX:
                x86_finalize_swtf(&ctx->tf.sw_tf);
                break;
+       case ROS_VM_CTX:
+               x86_finalize_vmtf(&ctx->tf.vm_tf);
+               break;
        }
 }
index 319174c..9b03d8e 100644 (file)
@@ -6,13 +6,15 @@
 
 #define ROS_HW_CTX                             1
 #define ROS_SW_CTX                             2
+#define ROS_VM_CTX                             3
 
-/* User-space context, either from a hardware event (IRQ, trap, etc), or from a
- * syscall.  Each arch defines its types. */
+/* User-space context, either from a hardware event (IRQ, trap, etc), from a
+ * syscall, or virtual machine.  Each arch defines its types. */
 struct user_context {
        int                                                     type;
        union {
                struct hw_trapframe             hw_tf;
                struct sw_trapframe             sw_tf;
+               struct vm_trapframe             vm_tf;
        } tf;
 };
index 118c81d..b5f8d3c 100644 (file)
@@ -17,6 +17,8 @@ void idt_init(void);
 int register_irq(int irq, isr_t handler, void *irq_arg, uint32_t tbdf);
 int route_irqs(int cpu_vec, int coreid);
 void print_trapframe(struct hw_trapframe *hw_tf);
+void print_swtrapframe(struct sw_trapframe *sw_tf);
+void print_vmtrapframe(struct vm_trapframe *vm_tf);
 void print_user_ctx(struct user_context *ctx);
 /* Generic per-core timer interrupt handler.  set_percore_timer() will fire the
  * timer_interrupt(). */
index e3eabfe..4946ea0 100644 (file)
@@ -79,26 +79,28 @@ error_out:
 uintptr_t get_user_ctx_pc(struct user_context *ctx)
 {
        switch (ctx->type) {
-               case ROS_HW_CTX:
-                       return get_hwtf_pc(&ctx->tf.hw_tf);
-               case ROS_SW_CTX:
-                       return get_swtf_pc(&ctx->tf.sw_tf);
-               default:
-                       warn("Bad context type %d for ctx %p\n", ctx->type, ctx);
-                       return 0;
+       case ROS_HW_CTX:
+               return get_hwtf_pc(&ctx->tf.hw_tf);
+       case ROS_SW_CTX:
+               return get_swtf_pc(&ctx->tf.sw_tf);
+       case ROS_VM_CTX:
+               return get_vmtf_pc(&ctx->tf.vm_tf);
+       default:
+               panic("Bad context type %d for ctx %p\n", ctx->type, ctx);
        }
 }
 
 uintptr_t get_user_ctx_fp(struct user_context *ctx)
 {
        switch (ctx->type) {
-               case ROS_HW_CTX:
-                       return get_hwtf_fp(&ctx->tf.hw_tf);
-               case ROS_SW_CTX:
-                       return get_swtf_fp(&ctx->tf.sw_tf);
-               default:
-                       warn("Bad context type %d for ctx %p\n", ctx->type, ctx);
-                       return 0;
+       case ROS_HW_CTX:
+               return get_hwtf_fp(&ctx->tf.hw_tf);
+       case ROS_SW_CTX:
+               return get_swtf_fp(&ctx->tf.sw_tf);
+       case ROS_VM_CTX:
+               return get_vmtf_fp(&ctx->tf.vm_tf);
+       default:
+               panic("Bad context type %d for ctx %p\n", ctx->type, ctx);
        }
 }
 
@@ -333,10 +335,17 @@ void print_kctx_depths(const char *str)
 
 void print_user_ctx(struct user_context *ctx)
 {
-       if (ctx->type == ROS_SW_CTX)
-               print_swtrapframe(&ctx->tf.sw_tf);
-       else if (ctx->type == ROS_HW_CTX)
+       switch (ctx->type) {
+       case ROS_HW_CTX:
                print_trapframe(&ctx->tf.hw_tf);
-       else
+               break;
+       case ROS_SW_CTX:
+               print_swtrapframe(&ctx->tf.sw_tf);
+               break;
+       case ROS_VM_CTX:
+               print_vmtrapframe(&ctx->tf.vm_tf);
+               break;
+       default:
                printk("Bad TF %p type %d!\n", ctx, ctx->type);
+       }
 }