-#ifdef __DEPUTY__
-#pragma noasync
-#endif
-
-#include <arch/mmu.h>
-#include <arch/x86.h>
-#include <arch/console.h>
-#include <arch/apic.h>
-#include <arch/smp.h>
-#include <assert.h>
-#include <pmap.h>
+/* Copyright (c) 2012 The Regents of the University of California
+ * Barret Rhoden <brho@cs.berkeley.edu>
+ * See LICENSE for details.
+ *
+ * Arch-independent trap handling and kernel messaging */
+
+#include <arch/arch.h>
+#include <smp.h>
#include <trap.h>
-#include <monitor.h>
-#include <process.h>
-
-#include <syscall.h>
-
-taskstate_t ts;
-
-/* Interrupt descriptor table. (Must be built at run time because
- * shifted function addresses can't be represented in relocation records.)
- */
-// Aligned on an 8 byte boundary (SDM V3A 5-13)
-gatedesc_t __attribute__ ((aligned (8))) idt[256] = { { 0 } };
-pseudodesc_t idt_pd = {
- sizeof(idt) - 1, (uint32_t) idt
-};
-
-/* global handler table, used by core0 (for now). allows the registration
- * of functions to be called when servicing an interrupt. other cores
- * can set up their own later.
- */
-handler_t interrupt_handlers[256];
+#include <stdio.h>
+#include <slab.h>
+#include <assert.h>
+#include <kdebug.h>
+#include <kmalloc.h>
-static const char *NTS (IN_HANDLER trapname)(int trapno)
+static void print_unhandled_trap(struct proc *p, struct user_context *ctx,
+ unsigned int trap_nr, unsigned int err,
+ unsigned long aux)
{
- // zra: excnames is NORACE because Ivy doesn't trust const
- static const char *NT const (NORACE excnames)[] = {
- "Divide error",
- "Debug",
- "Non-Maskable Interrupt",
- "Breakpoint",
- "Overflow",
- "BOUND Range Exceeded",
- "Invalid Opcode",
- "Device Not Available",
- "Double Fault",
- "Coprocessor Segment Overrun",
- "Invalid TSS",
- "Segment Not Present",
- "Stack Fault",
- "General Protection",
- "Page Fault",
- "(unknown trap)",
- "x87 FPU Floating-Point Error",
- "Alignment Check",
- "Machine-Check",
- "SIMD Floating-Point Exception"
- };
-
- if (trapno < sizeof(excnames)/sizeof(excnames[0]))
- return excnames[trapno];
- if (trapno == T_SYSCALL)
- return "System call";
- return "(unknown trap)";
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ uint32_t vcoreid = pcpui->owning_vcoreid;
+ struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[vcoreid];
+ static spinlock_t print_trap_lock = SPINLOCK_INITIALIZER;
+
+ spin_lock(&print_trap_lock);
+ if (!proc_is_vcctx_ready(p))
+ printk("Unhandled user trap from early SCP\n");
+ else if (vcpd->notif_disabled)
+ printk("Unhandled user trap in vcore context from VC %d\n", vcoreid);
+ print_user_ctx(ctx);
+ printk("err 0x%x (for PFs: User 4, Wr 2, Rd 1), aux %p\n", err, aux);
+ debug_addr_proc(p, get_user_ctx_pc(ctx));
+ print_vmrs(p);
+ backtrace_user_ctx(p, ctx);
+ spin_unlock(&print_trap_lock);
}
-
-void
-idt_init(void)
+/* Traps that are considered normal operations. */
+static bool benign_trap(unsigned int err)
{
- extern segdesc_t gdt[];
-
- // This table is made in trapentry.S by each macro in that file.
- // It is layed out such that the ith entry is the ith's traphandler's
- // (uint32_t) trap addr, then (uint32_t) trap number
- struct trapinfo { uint32_t trapaddr; uint32_t trapnumber; };
- extern struct trapinfo (BND(__this,trap_tbl_end) trap_tbl)[];
- extern struct trapinfo (SNT trap_tbl_end)[];
- int i, trap_tbl_size = trap_tbl_end - trap_tbl;
- extern void ISR_default(void);
-
- // set all to default, to catch everything
- for(i = 0; i < 256; i++)
- SETGATE(idt[i], 0, GD_KT, &ISR_default, 0);
-
- // set all entries that have real trap handlers
- // we need to stop short of the last one, since the last is the default
- // handler with a fake interrupt number (500) that is out of bounds of
- // the idt[]
- // if we set these to trap gates, be sure to handle the IRQs separately
- // and we might need to break our pretty tables
- for(i = 0; i < trap_tbl_size - 1; i++)
- SETGATE(idt[trap_tbl[i].trapnumber], 0, GD_KT, trap_tbl[i].trapaddr, 0);
-
- // turn on syscall handling and other user-accessible ints
- // DPL 3 means this can be triggered by the int instruction
- // STS_TG32 sets the IDT type to a Trap Gate (interrupts enabled)
- idt[T_SYSCALL].gd_dpl = 3;
- idt[T_SYSCALL].gd_type = STS_TG32;
- idt[T_BRKPT].gd_dpl = 3;
-
- // Setup a TSS so that we get the right stack
- // when we trap to the kernel.
- ts.ts_esp0 = KSTACKTOP;
- ts.ts_ss0 = GD_KD;
-
- // Initialize the TSS field of the gdt.
- gdt[GD_TSS >> 3] = SEG16(STS_T32A, (uint32_t) (&ts),
- sizeof(taskstate_t), 0);
- gdt[GD_TSS >> 3].sd_s = 0;
+ return err & PF_VMR_BACKED;
+}
- // Load the TSS
- ltr(GD_TSS);
+static void printx_unhandled_trap(struct proc *p, struct user_context *ctx,
+ unsigned int trap_nr, unsigned int err,
+ unsigned long aux)
+{
+ if (printx_on && !benign_trap(err))
+ print_unhandled_trap(p, ctx, trap_nr, err, aux);
+}
- // Load the IDT
- asm volatile("lidt idt_pd");
+/* Helper, reflects the current context back to the 2LS. Returns 0 on success,
+ * -1 on failure. */
+int reflect_current_context(void)
+{
+ uint32_t coreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+ struct proc *p = pcpui->cur_proc;
+ uint32_t vcoreid = pcpui->owning_vcoreid;
+ struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[vcoreid];
+
+ assert(pcpui->cur_proc == pcpui->owning_proc);
+ if (!proc_is_vcctx_ready(p))
+ return -1;
+ if (vcpd->notif_disabled)
+ return -1;
+ /* the guts of a __notify */
+ vcpd->notif_disabled = TRUE;
+ copy_current_ctx_to(&vcpd->uthread_ctx);
+ memset(pcpui->cur_ctx, 0, sizeof(struct user_context));
+ proc_init_ctx(pcpui->cur_ctx, vcoreid, vcpd->vcore_entry,
+ vcpd->vcore_stack, vcpd->vcore_tls_desc);
+ return 0;
+}
- // This will go away when we start using the IOAPIC properly
- pic_remap();
- // set LINT0 to receive ExtINTs (KVM's default). At reset they are 0x1000.
- write_mmreg32(LAPIC_LVT_LINT0, 0x700);
- // mask it to shut it up for now
- mask_lapic_lvt(LAPIC_LVT_LINT0);
- // and turn it on
- lapic_enable();
+void reflect_unhandled_trap(unsigned int trap_nr, unsigned int err,
+ unsigned long aux)
+{
+ uint32_t coreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+ struct proc *p = pcpui->cur_proc;
+
+ assert(p);
+ assert(pcpui->cur_ctx && (pcpui->cur_ctx->type == ROS_HW_CTX));
+ /* need to store trap_nr, err code, and aux into the tf so that it can get
+ * extracted on the other end, and we need to flag the TF in some way so we
+ * can tell it was reflected. for example, on a PF, we need some number (14
+ * on x86), the prot violation (write, read, etc), and the virt addr (aux).
+ * parlib will know how to extract this info. */
+ __arch_reflect_trap_hwtf(&pcpui->cur_ctx->tf.hw_tf, trap_nr, err, aux);
+ printx_unhandled_trap(p, pcpui->cur_ctx, trap_nr, err, aux);
+ if (reflect_current_context()) {
+ print_unhandled_trap(p, pcpui->cur_ctx, trap_nr, err, aux);
+ proc_destroy(p);
+ }
}
-void
-(IN_HANDLER print_trapframe)(trapframe_t *tf)
+/* Helper, copies the current context to to_ctx. */
+void copy_current_ctx_to(struct user_context *to_ctx)
{
- cprintf("TRAP frame at %p on core %d\n", tf, coreid());
- print_regs(&tf->tf_regs);
- cprintf(" es 0x----%04x\n", tf->tf_es);
- cprintf(" ds 0x----%04x\n", tf->tf_ds);
- cprintf(" trap 0x%08x %s\n", tf->tf_trapno, trapname(tf->tf_trapno));
- cprintf(" err 0x%08x\n", tf->tf_err);
- cprintf(" eip 0x%08x\n", tf->tf_eip);
- cprintf(" cs 0x----%04x\n", tf->tf_cs);
- cprintf(" flag 0x%08x\n", tf->tf_eflags);
- cprintf(" esp 0x%08x\n", tf->tf_esp);
- cprintf(" ss 0x----%04x\n", tf->tf_ss);
+ struct user_context *cur_ctx = current_ctx;
+
+ /* Be sure to finalize into cur_ctx, not the to_ctx. o/w the arch could get
+ * confused by other calls to finalize. */
+ arch_finalize_ctx(cur_ctx);
+ *to_ctx = *cur_ctx;
}
-void
-(IN_HANDLER print_regs)(push_regs_t *regs)
+struct kmem_cache *kernel_msg_cache;
+
+void kernel_msg_init(void)
{
- cprintf(" edi 0x%08x\n", regs->reg_edi);
- cprintf(" esi 0x%08x\n", regs->reg_esi);
- cprintf(" ebp 0x%08x\n", regs->reg_ebp);
- cprintf(" oesp 0x%08x\n", regs->reg_oesp);
- cprintf(" ebx 0x%08x\n", regs->reg_ebx);
- cprintf(" edx 0x%08x\n", regs->reg_edx);
- cprintf(" ecx 0x%08x\n", regs->reg_ecx);
- cprintf(" eax 0x%08x\n", regs->reg_eax);
+ kernel_msg_cache = kmem_cache_create("kernel_msgs",
+ sizeof(struct kernel_message),
+ ARCH_CL_SIZE, 0, NULL, 0, 0, NULL);
}
-static void
-(IN_HANDLER trap_dispatch)(trapframe_t *tf)
+uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
+ long arg2, int type)
{
- // Handle processor exceptions.
- switch(tf->tf_trapno) {
- case T_BRKPT:
- while (1)
- monitor(tf);
- // never get to this
- assert(0);
- case T_PGFLT:
- page_fault_handler(tf);
+ kernel_message_t *k_msg;
+ assert(pc);
+ // note this will be freed on the destination core
+ k_msg = kmem_cache_alloc(kernel_msg_cache, 0);
+ k_msg->srcid = core_id();
+ k_msg->dstid = dst;
+ k_msg->pc = pc;
+ k_msg->arg0 = arg0;
+ k_msg->arg1 = arg1;
+ k_msg->arg2 = arg2;
+ switch (type) {
+ case KMSG_IMMEDIATE:
+ spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
+ STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
+ spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
break;
- case T_SYSCALL:
- // check for userspace, for now
- assert(tf->tf_cs != GD_KT);
- tf->tf_regs.reg_eax =
- syscall(current, tf->tf_regs.reg_eax, tf->tf_regs.reg_edx,
- tf->tf_regs.reg_ecx, tf->tf_regs.reg_ebx,
- tf->tf_regs.reg_edi, tf->tf_regs.reg_esi);
- proc_startcore(current, tf); // Note the comment in syscall.c
+ case KMSG_ROUTINE:
+ spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
+ STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
+ spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
break;
default:
- // Unexpected trap: The user process or the kernel has a bug.
- print_trapframe(tf);
- if (tf->tf_cs == GD_KT)
- panic("Damn Damn! Unhandled trap in the kernel!");
- else {
- warn("Unexpected trap from userspace");
- env_destroy(current);
- return;
- }
+ panic("Unknown type of kernel message!");
}
- return;
+ /* since we touched memory the other core will touch (the lock), we don't
+ * need an wmb_f() */
+ /* if we're sending a routine message locally, we don't want/need an IPI */
+ if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
+ send_ipi(dst, I_KERNEL_MSG);
+ return 0;
}
-void
-(IN_HANDLER trap)(trapframe_t *tf)
+/* Kernel message IPI/IRQ handler.
+ *
+ * This processes immediate messages, and that's it (it used to handle routines
+ * too, if it came in from userspace). Routine messages will get processed when
+ * the kernel has a chance (right before popping to userspace or in smp_idle
+ * before halting).
+ *
+ * Note that all of this happens from interrupt context, and interrupts are
+ * disabled. */
+void handle_kmsg_ipi(struct hw_trapframe *hw_tf, void *data)
{
- //cprintf("Incoming TRAP frame at %p\n", tf);
-
- if ((tf->tf_cs & ~3) != GD_UT && (tf->tf_cs & ~3) != GD_KT) {
- print_trapframe(tf);
- panic("Trapframe with invalid CS!");
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ struct kernel_message *kmsg_i, *temp;
+ /* Avoid locking if the list appears empty (lockless peek is okay) */
+ if (STAILQ_EMPTY(&pcpui->immed_amsgs))
+ return;
+ /* The lock serves as a cmb to force a re-read of the head of the list */
+ spin_lock_irqsave(&pcpui->immed_amsg_lock);
+ STAILQ_FOREACH_SAFE(kmsg_i, &pcpui->immed_amsgs, link, temp) {
+ pcpui_trace_kmsg(pcpui, (uintptr_t)kmsg_i->pc);
+ kmsg_i->pc(kmsg_i->srcid, kmsg_i->arg0, kmsg_i->arg1, kmsg_i->arg2);
+ STAILQ_REMOVE(&pcpui->immed_amsgs, kmsg_i, kernel_message, link);
+ kmem_cache_free(kernel_msg_cache, (void*)kmsg_i);
}
-
- if ((tf->tf_cs & 3) == 3) {
- // Trapped from user mode.
- // TODO: this will change when an env has more than one context
- // Copy trap frame (which is currently on the stack)
- // into 'current->env_tf', so that running the environment
- // will restart at the trap point.
- assert(current);
- current->env_tf = *tf;
- // The trapframe on the stack should be ignored from here on.
- tf = ¤t->env_tf;
- }
-
- // Dispatch based on what type of trap occurred
- trap_dispatch(tf);
-
- // should this be if == 3? Sort out later when we handle traps.
- // so far we never get here
- assert(0);
- // Return to the current environment, which should be runnable.
- proc_startcore(current, tf); // Note the comment in syscall.c
+ spin_unlock_irqsave(&pcpui->immed_amsg_lock);
}
-void
-(IN_HANDLER irq_handler)(trapframe_t *tf)
+bool has_routine_kmsg(void)
{
- //if (coreid())
- // cprintf("Incoming IRQ, ISR: %d on core %d\n", tf->tf_trapno, coreid());
- // merge this with alltraps? other than the EOI... or do the same in all traps
-
- extern handler_wrapper_t handler_wrappers[NUM_HANDLER_WRAPPERS];
-
- // determine the interrupt handler table to use. for now, pick the global
- handler_t* handler_tbl = interrupt_handlers;
-
- if (handler_tbl[tf->tf_trapno].isr != 0)
- handler_tbl[tf->tf_trapno].isr(tf, handler_tbl[tf->tf_trapno].data);
- // if we're a general purpose IPI function call, down the cpu_list
- if ((0xf0 <= tf->tf_trapno) && (tf->tf_trapno < 0xf0 +NUM_HANDLER_WRAPPERS))
- down_checklist(handler_wrappers[tf->tf_trapno & 0x0f].cpu_list);
-
- // Send EOI. might want to do this in assembly, and possibly earlier
- // This is set up to work with an old PIC for now
- // Convention is that all IRQs between 32 and 47 are for the PIC.
- // All others are LAPIC (timer, IPIs, perf, non-ExtINT LINTS, etc)
- // For now, only 235-255 are available
- assert(tf->tf_trapno >= 32); // slows us down, but we should never have this
- if (tf->tf_trapno < 48)
- pic_send_eoi(tf->tf_trapno - PIC1_OFFSET);
- else
- lapic_send_eoi();
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ /* lockless peek */
+ return !STAILQ_EMPTY(&pcpui->routine_amsgs);
}
-void
-register_interrupt_handler(handler_t table[], uint8_t int_num, isr_t handler,
- void* data)
+/* Helper function, gets the next routine KMSG (RKM). Returns 0 if the list was
+ * empty. */
+static kernel_message_t *get_next_rkmsg(struct per_cpu_info *pcpui)
{
- table[int_num].isr = handler;
- table[int_num].data = data;
+ struct kernel_message *kmsg;
+ /* Avoid locking if the list appears empty (lockless peek is okay) */
+ if (STAILQ_EMPTY(&pcpui->routine_amsgs))
+ return 0;
+ /* The lock serves as a cmb to force a re-read of the head of the list.
+ * IRQs are disabled by our caller. */
+ spin_lock(&pcpui->routine_amsg_lock);
+ kmsg = STAILQ_FIRST(&pcpui->routine_amsgs);
+ if (kmsg)
+ STAILQ_REMOVE_HEAD(&pcpui->routine_amsgs, link);
+ spin_unlock(&pcpui->routine_amsg_lock);
+ return kmsg;
}
-void
-page_fault_handler(trapframe_t *tf)
+/* Runs routine kernel messages. This might not return. In the past, this
+ * would also run immediate messages, but this is unnecessary. Immediates will
+ * run whenever we reenable IRQs. We could have some sort of ordering or
+ * guarantees between KMSG classes, but that's not particularly useful at this
+ * point.
+ *
+ * Note this runs from normal context, with interruptes disabled. However, a
+ * particular RKM could enable interrupts - for instance __launch_kthread() will
+ * restore an old kthread that may have had IRQs on. */
+void process_routine_kmsg(void)
{
- uint32_t fault_va;
-
- // Read processor's CR2 register to find the faulting address
- fault_va = rcr2();
-
- // Handle kernel-mode page faults.
+ uint32_t pcoreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[pcoreid];
+ struct kernel_message msg_cp, *kmsg;
+
+ /* Important that callers have IRQs disabled. When sending cross-core RKMs,
+ * the IPI is used to keep the core from going to sleep - even though RKMs
+ * aren't handled in the kmsg handler. Check smp_idle() for more info. */
+ assert(!irq_is_enabled());
+ while ((kmsg = get_next_rkmsg(pcpui))) {
+ /* Copy in, and then free, in case we don't return */
+ msg_cp = *kmsg;
+ kmem_cache_free(kernel_msg_cache, (void*)kmsg);
+ assert(msg_cp.dstid == pcoreid); /* caught a brutal bug with this */
+ set_rkmsg(pcpui); /* we're now in early RKM ctx */
+ /* The kmsg could block. If it does, we want the kthread code to know
+ * it's not running on behalf of a process, and we're actually spawning
+ * a kernel task. While we do have a syscall that does work in an RKM
+ * (change_to), it's not really the rest of the syscall context. */
+ pcpui->cur_kthread->flags = KTH_KTASK_FLAGS;
+ pcpui_trace_kmsg(pcpui, (uintptr_t)msg_cp.pc);
+ msg_cp.pc(msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
+ /* And if we make it back, be sure to restore the default flags. If we
+ * never return, but the kthread exits via some other way (smp_idle()),
+ * then smp_idle() will deal with the flags. The default state includes
+ * 'not a ktask'. */
+ pcpui->cur_kthread->flags = KTH_DEFAULT_FLAGS;
+ /* If we aren't still in early RKM, it is because the KMSG blocked
+ * (thus leaving early RKM, finishing in default context) and then
+ * returned. This is a 'detached' RKM. Must idle in this scenario,
+ * since we might have migrated or otherwise weren't meant to PRKM
+ * (can't return twice). Also note that this may involve a core
+ * migration, so we need to reread pcpui.*/
+ cmb();
+ pcpui = &per_cpu_info[core_id()];
+ if (!in_early_rkmsg_ctx(pcpui))
+ smp_idle();
+ clear_rkmsg(pcpui);
+ /* Some RKMs might turn on interrupts (perhaps in the future) and then
+ * return. */
+ disable_irq();
+ }
+}
- // TODO - one day, we'll want to handle this.
- if ((tf->tf_cs & 3) == 0) {
- print_trapframe(tf);
- panic("Page Fault in the Kernel at 0x%08x!", fault_va);
+/* extremely dangerous and racy: prints out the immed and routine kmsgs for a
+ * specific core (so possibly remotely) */
+void print_kmsgs(uint32_t coreid)
+{
+ struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+ void __print_kmsgs(struct kernel_msg_list *list, char *type)
+ {
+ char *fn_name;
+ struct kernel_message *kmsg_i;
+ STAILQ_FOREACH(kmsg_i, list, link) {
+ fn_name = get_fn_name((long)kmsg_i->pc);
+ printk("%s KMSG on %d from %d to run %p(%s)(%p, %p, %p)\n", type,
+ kmsg_i->dstid, kmsg_i->srcid, kmsg_i->pc, fn_name,
+ kmsg_i->arg0, kmsg_i->arg1, kmsg_i->arg2);
+ kfree(fn_name);
+ }
}
+ __print_kmsgs(&pcpui->immed_amsgs, "Immedte");
+ __print_kmsgs(&pcpui->routine_amsgs, "Routine");
+}
- // We've already handled kernel-mode exceptions, so if we get here,
- // the page fault happened in user mode.
+/* Debugging stuff */
+void kmsg_queue_stat(void)
+{
+ struct kernel_message *kmsg;
+ bool immed_emp, routine_emp;
+ for (int i = 0; i < num_cores; i++) {
+ spin_lock_irqsave(&per_cpu_info[i].immed_amsg_lock);
+ immed_emp = STAILQ_EMPTY(&per_cpu_info[i].immed_amsgs);
+ spin_unlock_irqsave(&per_cpu_info[i].immed_amsg_lock);
+ spin_lock_irqsave(&per_cpu_info[i].routine_amsg_lock);
+ routine_emp = STAILQ_EMPTY(&per_cpu_info[i].routine_amsgs);
+ spin_unlock_irqsave(&per_cpu_info[i].routine_amsg_lock);
+ printk("Core %d's immed_emp: %d, routine_emp %d\n", i, immed_emp,
+ routine_emp);
+ if (!immed_emp) {
+ kmsg = STAILQ_FIRST(&per_cpu_info[i].immed_amsgs);
+ printk("Immed msg on core %d:\n", i);
+ printk("\tsrc: %d\n", kmsg->srcid);
+ printk("\tdst: %d\n", kmsg->dstid);
+ printk("\tpc: %p\n", kmsg->pc);
+ printk("\targ0: %p\n", kmsg->arg0);
+ printk("\targ1: %p\n", kmsg->arg1);
+ printk("\targ2: %p\n", kmsg->arg2);
+ }
+ if (!routine_emp) {
+ kmsg = STAILQ_FIRST(&per_cpu_info[i].routine_amsgs);
+ printk("Routine msg on core %d:\n", i);
+ printk("\tsrc: %d\n", kmsg->srcid);
+ printk("\tdst: %d\n", kmsg->dstid);
+ printk("\tpc: %p\n", kmsg->pc);
+ printk("\targ0: %p\n", kmsg->arg0);
+ printk("\targ1: %p\n", kmsg->arg1);
+ printk("\targ2: %p\n", kmsg->arg2);
+ }
- // Call the environment's page fault upcall, if one exists. Set up a
- // page fault stack frame on the user exception stack (below
- // UXSTACKTOP), then branch to current->env_pgfault_upcall.
- //
- // The page fault upcall might cause another page fault, in which case
- // we branch to the page fault upcall recursively, pushing another
- // page fault stack frame on top of the user exception stack.
- //
- // The trap handler needs one word of scratch space at the top of the
- // trap-time stack in order to return. In the non-recursive case, we
- // don't have to worry about this because the top of the regular user
- // stack is free. In the recursive case, this means we have to leave
- // an extra word between the current top of the exception stack and
- // the new stack frame because the exception stack _is_ the trap-time
- // stack.
- //
- // If there's no page fault upcall, the environment didn't allocate a
- // page for its exception stack, or the exception stack overflows,
- // then destroy the environment that caused the fault.
- //
- // Hints:
- // user_mem_assert() and env_run() are useful here.
- // To change what the user environment runs, modify 'current->env_tf'
- // (the 'tf' variable points at 'current->env_tf').
+ }
+}
- // LAB 4: Your code here.
+void print_kctx_depths(const char *str)
+{
+ uint32_t coreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[coreid];
- // Destroy the environment that caused the fault.
- cprintf("[%08x] user fault va %08x ip %08x from core %d\n",
- current->env_id, fault_va, tf->tf_eip, coreid());
- print_trapframe(tf);
- env_destroy(current);
+ if (!str)
+ str = "(none)";
+ printk("%s: Core %d, irq depth %d, ktrap depth %d, irqon %d\n", str, coreid,
+ irq_depth(pcpui), ktrap_depth(pcpui), irq_is_enabled());
}
-void sysenter_init(void)
+void print_user_ctx(struct user_context *ctx)
{
- write_msr(MSR_IA32_SYSENTER_CS, GD_KT);
- write_msr(MSR_IA32_SYSENTER_ESP, ts.ts_esp0);
- write_msr(MSR_IA32_SYSENTER_EIP, (uint32_t) &sysenter_handler);
+ switch (ctx->type) {
+ case ROS_HW_CTX:
+ print_trapframe(&ctx->tf.hw_tf);
+ break;
+ case ROS_SW_CTX:
+ print_swtrapframe(&ctx->tf.sw_tf);
+ break;
+ case ROS_VM_CTX:
+ print_vmtrapframe(&ctx->tf.vm_tf);
+ break;
+ default:
+ printk("Bad TF %p type %d!\n", ctx, ctx->type);
+ }
}