return "(unknown trap)";
}
+/* Set stacktop for the current core to be the stack the kernel will start on
+ * when trapping/interrupting from userspace. Don't use this til after
+ * smp_percpu_init(). We can probably get the TSS by reading the task register
+ * and then the GDT. Still, it's a pain. */
+void set_stack_top(uintptr_t stacktop)
+{
+ struct per_cpu_info *pcpu = &per_cpu_info[core_id()];
+ /* No need to reload the task register, this takes effect immediately */
+ pcpu->tss->ts_esp0 = stacktop;
+ /* Also need to make sure sysenters come in correctly */
+ write_msr(MSR_IA32_SYSENTER_ESP, stacktop);
+}
-void
-idt_init(void)
+/* Note the check implies we only are on a one page stack (or the first page) */
+uintptr_t get_stack_top(void)
+{
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ uintptr_t stacktop;
+ /* so we can check this in interrupt handlers (before smp_boot()) */
+ if (!pcpui->tss)
+ return ROUNDUP(read_esp(), PGSIZE);
+ stacktop = pcpui->tss->ts_esp0;
+ if (stacktop != ROUNDUP(read_esp(), PGSIZE))
+ panic("Bad stacktop: %08p esp one is %08p\n", stacktop,
+ ROUNDUP(read_esp(), PGSIZE));
+ return stacktop;
+}
+
+/* Starts running the current TF, just using ret. */
+void pop_kernel_tf(struct trapframe *tf)
+{
+ asm volatile ("movl %1,%%esp; " /* move to future stack */
+ "pushl %2; " /* push cs */
+ "movl %0,%%esp; " /* move to TF */
+ "addl $0x20,%%esp; " /* move to tf_gs slot */
+ "movl %1,(%%esp); " /* write future esp */
+ "subl $0x20,%%esp; " /* move back to tf start */
+ "popal; " /* restore regs */
+ "popl %%esp; " /* set stack ptr */
+ "subl $0x4,%%esp; " /* jump down past CS */
+ "ret " /* return to the EIP */
+ :
+ : "g"(tf), "r"(tf->tf_esp), "r"(tf->tf_eip) : "memory");
+ panic("ret failed"); /* mostly to placate your mom */
+}
+
+void idt_init(void)
{
extern segdesc_t (RO gdt)[];
idt[T_SYSCALL].gd_type = SINIT(STS_TG32);
idt[T_BRKPT].gd_dpl = SINIT(3);
- // Setup a TSS so that we get the right stack
- // when we trap to the kernel.
- ts.ts_esp0 = SINIT(KSTACKTOP);
+ /* Setup a TSS so that we get the right stack when we trap to the kernel.
+ * We need to use the KVA for stacktop, and not the memlayout virtual
+ * address, so we can free it later (and check for other bugs). */
+ pte_t *pte = pgdir_walk(boot_pgdir, (void*)KSTACKTOP - PGSIZE, 0);
+ uintptr_t stacktop_kva = (uintptr_t)ppn2kva(PTE2PPN(*pte)) + PGSIZE;
+ ts.ts_esp0 = stacktop_kva;
ts.ts_ss0 = SINIT(GD_KD);
// Initialize the TSS field of the gdt.
mask_lapic_lvt(LAPIC_LVT_LINT0);
// and turn it on
lapic_enable();
+ /* register the generic timer_interrupt() handler for the per-core timers */
+ register_interrupt_handler(interrupt_handlers, LAPIC_TIMER_DEFAULT_VECTOR,
+ timer_interrupt, NULL);
}
void
case T_SYSCALL:
// check for userspace, for now
assert(tf->tf_cs != GD_KT);
-
- // syscall code wants an edible reference for current
- proc_incref(current, 1);
- tf->tf_regs.reg_eax =
- syscall(current, tf->tf_regs.reg_eax, tf->tf_regs.reg_edx,
- tf->tf_regs.reg_ecx, tf->tf_regs.reg_ebx,
- tf->tf_regs.reg_edi, tf->tf_regs.reg_esi);
- proc_decref(current, 1);
+ /* Set up and run the async calls */
+ prep_syscalls(current, (struct syscall*)tf->tf_regs.reg_eax,
+ tf->tf_regs.reg_edx);
break;
default:
// Unexpected trap: The user process or the kernel has a bug.
warn("Unexpected trap from userspace");
proc_incref(current, 1);
proc_destroy(current);
+ assert(0);
return;
}
}
// Here's where you'll restore FP/MMX/XMM regs
}
-void
-trap(trapframe_t *tf)
+/* Helper. For now, this copies out the TF to pcpui, and sets the tf to use it.
+ * Eventually, we ought to do this in trapentry.S */
+static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
{
- //printk("Incoming TRAP frame on core %d at %p\n", core_id(), tf);
+ pcpui->actual_tf = **tf;
+ pcpui->cur_tf = &pcpui->actual_tf;
+ *tf = &pcpui->actual_tf;
+}
- /* Note we are not preemptively saving the TF in the env_tf. We do maintain
- * a reference to it in current_tf (a per-cpu pointer).
- * In general, only save the tf and any silly state once you know it
- * is necessary (blocking). And only save it in env_tf when you know you
- * are single core (PROC_RUNNING_S) */
+void trap(struct trapframe *tf)
+{
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ /* Copy out the TF for now, set tf to point to it. */
if (!in_kernel(tf))
- set_current_tf(tf);
+ set_current_tf(pcpui, &tf);
+ printd("Incoming TRAP %d on core %d, TF at %p\n", tf->tf_trapno, core_id(),
+ tf);
if ((tf->tf_cs & ~3) != GD_UT && (tf->tf_cs & ~3) != GD_KT) {
print_trapframe(tf);
panic("Trapframe with invalid CS!");
}
-
- // Dispatch based on what type of trap occurred
trap_dispatch(tf);
-
- // Return to the current process, which should be runnable.
- proc_restartcore(current, tf); // Note the comment in syscall.c
+ /* Return to the current process, which should be runnable. If we're the
+ * kernel, we should just return naturally. Note that current and tf need
+ * to still be okay (might not be after blocking) */
+ if (in_kernel(tf))
+ return; /* TODO: think about this, might want a helper instead. */
+ proc_restartcore();
+ assert(0);
}
-void
-irq_handler(trapframe_t *tf)
+void irq_handler(struct trapframe *tf)
{
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ /* Copy out the TF for now, set tf to point to it. */
if (!in_kernel(tf))
- set_current_tf(tf);
+ set_current_tf(pcpui, &tf);
+
//if (core_id())
- // cprintf("Incoming IRQ, ISR: %d on core %d\n", tf->tf_trapno, core_id());
- // merge this with alltraps? other than the EOI... or do the same in all traps
+ printd("Incoming IRQ, ISR: %d on core %d\n", tf->tf_trapno, core_id());
extern handler_wrapper_t (RO handler_wrappers)[NUM_HANDLER_WRAPPERS];
// All others are LAPIC (timer, IPIs, perf, non-ExtINT LINTS, etc)
// For now, only 235-255 are available
assert(tf->tf_trapno >= 32); // slows us down, but we should never have this
-
+
+#ifdef __CONFIG_ENABLE_MPTABLES__
+ /* TODO: this should be for any IOAPIC EOI, not just MPTABLES */
lapic_send_eoi();
-
- /*
+#else
//Old PIC relatd code. Should be gone for good, but leaving it just incase.
if (tf->tf_trapno < 48)
pic_send_eoi(tf->tf_trapno - PIC1_OFFSET);
else
lapic_send_eoi();
- */
-
+#endif
+ /* Return to the current process, which should be runnable. If we're the
+ * kernel, we should just return naturally. Note that current and tf need
+ * to still be okay (might not be after blocking) */
+ if (in_kernel(tf))
+ return; /* TODO: think about this, might want a helper instead. */
+ proc_restartcore();
+ assert(0);
}
void
table[int_num].data = data;
}
-void
-page_fault_handler(trapframe_t *tf)
+void page_fault_handler(struct trapframe *tf)
{
- uint32_t fault_va;
-
- // Read processor's CR2 register to find the faulting address
- fault_va = rcr2();
-
- // Handle kernel-mode page faults.
+ uint32_t fault_va = rcr2();
+ int prot = tf->tf_err & PF_ERROR_WRITE ? PROT_WRITE : PROT_READ;
+ int err;
- // TODO - one day, we'll want to handle this.
+ /* TODO - handle kernel page faults */
if ((tf->tf_cs & 3) == 0) {
print_trapframe(tf);
panic("Page Fault in the Kernel at 0x%08x!", fault_va);
}
-
- // We've already handled kernel-mode exceptions, so if we get here,
- // the page fault happened in user mode.
-
- // Call the environment's page fault upcall, if one exists. Set up a
- // page fault stack frame on the user exception stack (below
- // UXSTACKTOP), then branch to current->env_pgfault_upcall.
- //
- // The page fault upcall might cause another page fault, in which case
- // we branch to the page fault upcall recursively, pushing another
- // page fault stack frame on top of the user exception stack.
- //
- // The trap handler needs one word of scratch space at the top of the
- // trap-time stack in order to return. In the non-recursive case, we
- // don't have to worry about this because the top of the regular user
- // stack is free. In the recursive case, this means we have to leave
- // an extra word between the current top of the exception stack and
- // the new stack frame because the exception stack _is_ the trap-time
- // stack.
- //
- // If there's no page fault upcall, the environment didn't allocate a
- // page for its exception stack, or the exception stack overflows,
- // then destroy the environment that caused the fault.
- //
- // Hints:
- // user_mem_assert() and env_run() are useful here.
- // To change what the user environment runs, modify 'current->env_tf'
- // (the 'tf' variable points at 'current->env_tf').
-
- // LAB 4: Your code here.
-
- // TODO: compute correct access type
- if(handle_page_fault(current,fault_va,PROT_READ))
- {
- // Destroy the environment that caused the fault.
- cprintf("[%08x] user fault va %08x ip %08x from core %d\n",
- current->pid, fault_va, tf->tf_eip, core_id());
+ if ((err = handle_page_fault(current, fault_va, prot))) {
+ /* Destroy the faulting process */
+ printk("[%08x] user %s fault va %08x ip %08x on core %d with err %d\n",
+ current->pid, prot & PROT_READ ? "READ" : "WRITE", fault_va,
+ tf->tf_eip, core_id(), err);
print_trapframe(tf);
proc_incref(current, 1);
proc_destroy(current);
+ assert(0);
}
}
/* This is called from sysenter's asm, with the tf on the kernel stack. */
void sysenter_callwrapper(struct trapframe *tf)
{
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ /* Copy out the TF for now, set tf to point to it. */
if (!in_kernel(tf))
- set_current_tf(tf);
-
- // syscall code wants an edible reference for current
- proc_incref(current, 1);
- tf->tf_regs.reg_eax = (intreg_t) syscall(current,
- tf->tf_regs.reg_eax,
- tf->tf_regs.reg_esi,
- tf->tf_regs.reg_ecx,
- tf->tf_regs.reg_ebx,
- tf->tf_regs.reg_edi,
- 0);
- proc_decref(current, 1);
- /*
- * careful here - we need to make sure that this current is the right
- * process, which could be weird if the syscall blocked. it would need to
- * restore the proper value in current before returning to here.
- * likewise, tf could be pointing to random gibberish.
- */
- proc_restartcore(current, tf);
+ set_current_tf(pcpui, &tf);
+
+ if (in_kernel(tf))
+ panic("sysenter from a kernel TF!!");
+ /* Set up and run the async calls */
+ prep_syscalls(current, (struct syscall*)tf->tf_regs.reg_eax,
+ tf->tf_regs.reg_esi);
+ proc_restartcore();
}
struct kmem_cache *kernel_msg_cache;
default:
panic("Unknown type of kernel message!");
}
- // since we touched memory the other core will touch (the lock), we don't
- // need an wmb_f()
- send_ipi(get_hw_coreid(dst), I_KERNEL_MSG);
+ /* since we touched memory the other core will touch (the lock), we don't
+ * need an wmb_f() */
+ /* if we're sending a routine message locally, we don't want/need an IPI */
+ if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
+ send_ipi(get_hw_coreid(dst), I_KERNEL_MSG);
return 0;
}
per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
kernel_message_t msg_cp, *k_msg;
+ /* Copy out the TF for now, set tf to point to it. */
+ if (!in_kernel(tf))
+ set_current_tf(myinfo, &tf);
+
lapic_send_eoi();
while (1) { // will break out when there are no more messages
/* Try to get an immediate message. Exec and free it. */
send_self_ipi(I_KERNEL_MSG);
/* Execute the kernel message */
assert(msg_cp.pc);
+ /* TODO: when batching syscalls, this should be reread from cur_tf*/
msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
}
}
* make sure immediates still run first (or when they arrive, if processing a
* bunch of these messages). This will disable interrupts, and restore them to
* whatever state you left them. */
-void process_routine_kmsg(void)
+void process_routine_kmsg(struct trapframe *tf)
{
per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
kernel_message_t msg_cp, *k_msg;
int8_t irq_state = 0;
disable_irqsave(&irq_state);
+ /* If we were told what our TF was, use that. o/w, go with current_tf. */
+ tf = tf ? tf : current_tf;
while (1) {
/* normally, we want ints disabled, so we don't have an empty self-ipi
* for every routine message. (imagine a long list of routines). But we
send_self_ipi(I_KERNEL_MSG);
/* Execute the kernel message */
assert(msg_cp.pc);
- msg_cp.pc(0, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
+ msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
}
}