Pmap ops: perm->settings
[akaros.git] / kern / arch / riscv / trap.c
index 84584ea..1d9055d 100644 (file)
@@ -1,6 +1,8 @@
 #include <arch/arch.h>
 #include <assert.h>
-#include <arch/trap.h>
+#include <trap.h>
+#include <arch/console.h>
+#include <console.h>
 #include <string.h>
 #include <process.h>
 #include <syscall.h>
@@ -10,7 +12,6 @@
 #include <smp.h>
 #include <slab.h>
 #include <mm.h>
-#include <ros/mman.h>
 #include <umem.h>
 #include <pmap.h>
 
  * per_cpu_info. */
 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
 
-struct kmem_cache *kernel_msg_cache;
-void kernel_msg_init(void)
-{
-       kernel_msg_cache = kmem_cache_create("kernel_msgs",
-                          sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
-}
-
-spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
-kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
-
-/* This is mostly identical to x86's, minus the different send_ipi call. */
-uint32_t send_kernel_message(uint32_t dst, amr_t pc,
-                             TV(a0t) arg0, TV(a1t) arg1, TV(a2t) arg2, int type)
-{
-       kernel_message_t *k_msg;
-       assert(pc);
-       // note this will be freed on the destination core
-       k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
-       k_msg->srcid = core_id();
-       k_msg->pc = pc;
-       k_msg->arg0 = arg0;
-       k_msg->arg1 = arg1;
-       k_msg->arg2 = arg2;
-       switch (type) {
-               case KMSG_IMMEDIATE:
-                       spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
-                       STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
-                       spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
-                       break;
-               case KMSG_ROUTINE:
-                       spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
-                       STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
-                       spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
-                       break;
-               default:
-                       panic("Unknown type of kernel message!");
-       }
-       /* if we're sending a routine message locally, we don't want/need an IPI */
-       if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
-               send_ipi(dst);
-       return 0;
-}
-
 void
-advance_pc(trapframe_t* state)
+advance_pc(struct hw_trapframe *state)
 {
        state->epc += 4;
 }
@@ -84,9 +42,8 @@ void set_stack_top(uintptr_t stacktop)
 /* Note the assertion assumes we are in the top page of the stack. */
 uintptr_t get_stack_top(void)
 {
-       uintptr_t sp, stacktop;
-       stacktop = core_stacktops[core_id()];
-       asm volatile("move %0,$sp" : "=r"(sp));
+       register uintptr_t sp asm ("sp");
+       uintptr_t stacktop = core_stacktops[core_id()];
        assert(ROUNDUP(sp, PGSIZE) == stacktop);
        return stacktop;
 }
@@ -96,161 +53,116 @@ idt_init(void)
 {
 }
 
-void
-sysenter_init(void)
+/* Helper.  For now, this copies out the TF to pcpui, and sets cur_ctx to point
+ * to it. */
+static void set_current_ctx_hw(struct per_cpu_info *pcpui,
+                               struct hw_trapframe *hw_tf)
 {
+       if (irq_is_enabled())
+               warn("Turn off IRQs until cur_ctx is set!");
+       assert(!pcpui->cur_ctx);
+       pcpui->actual_ctx.type = ROS_HW_CTX;
+       pcpui->actual_ctx.tf.hw_tf = *hw_tf;
+       pcpui->cur_ctx = &pcpui->actual_ctx;
 }
 
-/* Helper.  For now, this copies out the TF to pcpui, and sets the tf to use it.
- * Eventually, we ought to do this in trap_entry.S.  Honestly, do whatever you
- * want with this.  The **tf is for convenience in x86. */
-static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
+static void set_current_ctx_sw(struct per_cpu_info *pcpui,
+                               struct sw_trapframe *sw_tf)
 {
-       pcpui->actual_tf = **tf;
-       pcpui->cur_tf = &pcpui->actual_tf;
-       *tf = &pcpui->actual_tf;
+       if (irq_is_enabled())
+               warn("Turn off IRQs until cur_ctx is set!");
+       assert(!pcpui->cur_ctx);
+       pcpui->actual_ctx.type = ROS_SW_CTX;
+       pcpui->actual_ctx.tf.sw_tf = *sw_tf;
+       pcpui->cur_ctx = &pcpui->actual_ctx;
 }
 
 static int
-format_trapframe(trapframe_t *tf, char* buf, int bufsz)
+format_trapframe(struct hw_trapframe *hw_tf, char* buf, int bufsz)
 {
        // slightly hackish way to read out the instruction that faulted.
        // not guaranteed to be right 100% of the time
        uint32_t insn;
-       if(!(current && !memcpy_from_user(current,&insn,(void*)tf->epc,4)))
+       if(!(current && !memcpy_from_user(current,&insn,(void*)hw_tf->epc,4)))
                insn = -1;
 
        int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
-                          tf, core_id());
+                          hw_tf, core_id());
        static const char* regnames[] = {
-         "z ", "ra", "v0", "v1", "a0", "a1", "a2", "a3",
-         "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
-         "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3",
-         "s4", "s5", "s6", "s7", "s8", "fp", "sp", "tp"
+         "z ", "ra", "s0", "s1", "s2", "s3", "s4", "s5",
+         "s6", "s7", "s8", "s9", "sA", "sB", "sp", "tp",
+         "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5",
+         "a6", "a7", "a8", "a9", "aA", "aB", "aC", "aD"
        };
        
-       tf->gpr[0] = 0;
+       hw_tf->gpr[0] = 0;
        
        for(int i = 0; i < 32; i+=4)
        {
                for(int j = 0; j < 4; j++)
                        len += snprintf(buf+len, bufsz-len,
-                                       "%s %016lx%c", regnames[i+j], tf->gpr[i+j], 
+                                       "%s %016lx%c", regnames[i+j], hw_tf->gpr[i+j], 
                                        j < 3 ? ' ' : '\n');
        }
        len += snprintf(buf+len, bufsz-len,
-                       "sr %016lx pc %016lx va %016lx insn       %008x\n", tf->sr, tf->epc,
-                       tf->badvaddr, (uint32_t)tf->insn);
+                       "sr %016lx pc %016lx va %016lx insn       %08x\n",
+                                       hw_tf->sr, hw_tf->epc, hw_tf->badvaddr, insn);
 
+       buf[bufsz-1] = 0;
        return len;
 }
 
 void
-print_trapframe(trapframe_t* tf)
+print_trapframe(struct hw_trapframe *hw_tf)
 {
        char buf[1024];
-       int len = format_trapframe(tf,buf,sizeof(buf));
+       int len = format_trapframe(hw_tf, buf, sizeof(buf));
        cputbuf(buf,len);
 }
 
-/* Helper function.  Returns 0 if the list was empty. */
-static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
-                                       spinlock_t *list_lock)
+static void exit_halt_loop(struct hw_trapframe *hw_tf)
 {
-       kernel_message_t *k_msg;
-       spin_lock_irqsave(list_lock);
-       k_msg = STAILQ_FIRST(list_head);
-       if (k_msg)
-               STAILQ_REMOVE_HEAD(list_head, link);
-       spin_unlock_irqsave(list_lock);
-       return k_msg;
+       extern char after_cpu_halt;
+       if ((char*)hw_tf->epc >= (char*)&cpu_halt &&
+           (char*)hw_tf->epc < &after_cpu_halt)
+               hw_tf->epc = hw_tf->gpr[GPR_RA];
 }
 
-/* Mostly the same as x86's implementation.  Keep them in sync.  This assumes
- * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
-static void
-handle_ipi(trapframe_t* tf)
+static void handle_keypress(char c)
 {
-       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
-       if (!in_kernel(tf))
-               set_current_tf(pcpui, &tf);
-       else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
-               advance_pc(tf);
-
-       per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
-       kernel_message_t msg_cp, *k_msg;
-
-       while (1) { // will break out when there are no more messages
-               /* Try to get an immediate message.  Exec and free it. */
-               k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
-               if (k_msg) {
-                       assert(k_msg->pc);
-                       k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
-                       kmem_cache_free(kernel_msg_cache, (void*)k_msg);
-               } else { // no immediate, might be a routine
-                       if (in_kernel(tf))
-                               return; // don't execute routine msgs if we were in the kernel
-                       k_msg = get_next_amsg(&myinfo->routine_amsgs,
-                                             &myinfo->routine_amsg_lock);
-                       if (!k_msg) // no routines either
-                               return;
-                       /* copy in, and then free, in case we don't return */
-                       msg_cp = *k_msg;
-                       kmem_cache_free(kernel_msg_cache, (void*)k_msg);
-                       /* make sure an IPI is pending if we have more work */
-                       /* techincally, we don't need to lock when checking */
-                       if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
-                               send_ipi(core_id());
-                       /* Execute the kernel message */
-                       assert(msg_cp.pc);
-                       msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
-               }
-       }
+       /* brho: not sure if this will work on riscv or not... */
+       #define capchar2ctl(x) ((x) - '@')
+       amr_t handler = c == capchar2ctl('G') ? __run_mon : __cons_add_char;
+       send_kernel_message(core_id(), handler, (long)&cons_buf, (long)c, 0,
+                           KMSG_ROUTINE);
+       cons_init();
 }
 
-/* Same as in x86.  Might be diff in the future if there is no way to check for
- * immediate messages or there is the ability to selectively mask IPI vectors.*/
-void process_routine_kmsg(struct trapframe *tf)
+static void handle_host_interrupt(struct hw_trapframe *hw_tf)
 {
-       per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
-       kernel_message_t msg_cp, *k_msg;
-       int8_t irq_state = 0;
-
-       disable_irqsave(&irq_state);
-       /* If we were told what our TF was, use that.  o/w, go with current_tf. */
-       tf = tf ? tf : current_tf;
-       while (1) {
-               /* normally, we want ints disabled, so we don't have an empty self-ipi
-                * for every routine message. (imagine a long list of routines).  But we
-                * do want immediates to run ahead of routines.  This enabling should
-                * work (might not in some shitty VMs).  Also note we can receive an
-                * extra self-ipi for routine messages before we turn off irqs again.
-                * Not a big deal, since we will process it right away. */
-               if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
-                       enable_irq();
-                       cpu_relax();
-                       disable_irq();
-               }
-               k_msg = get_next_amsg(&myinfo->routine_amsgs,
-                                     &myinfo->routine_amsg_lock);
-               if (!k_msg) {
-                       enable_irqsave(&irq_state);
-                       return;
-               }
-               /* copy in, and then free, in case we don't return */
-               msg_cp = *k_msg;
-               kmem_cache_free(kernel_msg_cache, (void*)k_msg);
-               /* make sure an IPI is pending if we have more work */
-               if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
-                       send_ipi(core_id());
-               /* Execute the kernel message */
-               assert(msg_cp.pc);
-               msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
+       uintptr_t fh = mtpcr(PCR_FROMHOST, 0);
+       switch (fh >> 56)
+       {
+         case 0x00: return;
+         case 0x01: handle_keypress(fh); return;
+         default: assert(0);
        }
 }
 
+static void handle_timer_interrupt(struct hw_trapframe *hw_tf)
+{
+       timer_interrupt(hw_tf, NULL);
+}
+
+/* Assumes that any IPI you get is really a kernel message */
+static void handle_interprocessor_interrupt(struct hw_trapframe *hw_tf)
+{
+       clear_ipi();
+       handle_kmsg_ipi(hw_tf, 0);
+}
+
 static void
-unhandled_trap(trapframe_t* state, const char* name)
+unhandled_trap(struct hw_trapframe *state, const char* name)
 {
        static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
        spin_lock(&screwup_lock);
@@ -263,163 +175,126 @@ unhandled_trap(trapframe_t* state, const char* name)
        else
        {
                char tf_buf[1024];
-               int tf_len = format_trapframe(state, tf_buf, sizeof(tf_buf));
+               format_trapframe(state, tf_buf, sizeof(tf_buf));
 
                warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
                backtrace();
                spin_unlock(&screwup_lock);
 
                assert(current);
-               proc_incref(current, 1);
+               enable_irq();
                proc_destroy(current);
-
-               panic("I shouldn't have gotten here!");
        }
 }
 
 static void
-handle_timer_interrupt(trapframe_t* state)
-{
-       timer_interrupt(state, NULL);
-}
-
-static void
-handle_interrupt(trapframe_t* state)
-{
-       typedef void (*trap_handler)(trapframe_t*);
-       
-       const static trap_handler trap_handlers[NIRQ] = {
-         [TIMER_IRQ] = handle_timer_interrupt,
-         [IPI_IRQ] = handle_ipi,
-       };
-
-       int interrupts = (state->cause & CAUSE_IP) >> CAUSE_IP_SHIFT;
-
-       for(int i = 0; interrupts; interrupts >>= 1, i++)
-       {
-               if(interrupts & 1)
-               {
-                       if(trap_handlers[i])
-                               trap_handlers[i](state);
-                       else
-                       {
-                               char name[32];
-                               snprintf(name, sizeof(name), "Bad Interrupt %d", i);
-                               unhandled_trap(state, name);
-                       }
-               }
-       }
-}
-
-static void
-handle_misaligned_fetch(trapframe_t* state)
+handle_misaligned_fetch(struct hw_trapframe *state)
 {
        unhandled_trap(state, "Misaligned Fetch");
 }
 
 static void
-handle_misaligned_load(trapframe_t* state)
+handle_misaligned_load(struct hw_trapframe *state)
 {
        unhandled_trap(state, "Misaligned Load");
 }
 
 static void
-handle_misaligned_store(trapframe_t* state)
+handle_misaligned_store(struct hw_trapframe *state)
 {
        unhandled_trap(state, "Misaligned Store");
 }
 
 static void
-handle_fault_fetch(trapframe_t* state)
+handle_fault_fetch(struct hw_trapframe *state)
 {
        if(in_kernel(state))
        {
                print_trapframe(state);
-               panic("Load Page Fault in the Kernel at 0x%08x!", state->badvaddr);
+               panic("Instruction Page Fault in the Kernel at %p!", state->epc);
        }
-       
-       if(handle_page_fault(current, state->badvaddr, PROT_READ))
-               unhandled_trap(state, "Load Page Fault");
+
+       set_current_ctx_hw(&per_cpu_info[core_id()], state);
+
+#warning "returns EAGAIN if you should reflect the fault"
+       if(handle_page_fault(current, state->epc, PROT_EXEC))
+               unhandled_trap(state, "Instruction Page Fault");
 }
 
 static void
-handle_fault_load(trapframe_t* state)
+handle_fault_load(struct hw_trapframe *state)
 {
        if(in_kernel(state))
        {
                print_trapframe(state);
-               panic("Load Page Fault in the Kernel at 0x%08x!", state->badvaddr);
+               panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
        }
-       
+
+       set_current_ctx_hw(&per_cpu_info[core_id()], state);
+
+#warning "returns EAGAIN if you should reflect the fault"
        if(handle_page_fault(current, state->badvaddr, PROT_READ))
                unhandled_trap(state, "Load Page Fault");
 }
 
 static void
-handle_fault_store(trapframe_t* state)
+handle_fault_store(struct hw_trapframe *state)
 {
        if(in_kernel(state))
        {
                print_trapframe(state);
-               panic("Store Page Fault in the Kernel at 0x%08x!", state->badvaddr);
+               panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
        }
-       
+
+       set_current_ctx_hw(&per_cpu_info[core_id()], state);
+
        if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
                unhandled_trap(state, "Store Page Fault");
 }
 
 static void
-handle_illegal_instruction(trapframe_t* state)
+handle_illegal_instruction(struct hw_trapframe *state)
 {
-       unhandled_trap(state, "Illegal Instruction");
-}
+       assert(!in_kernel(state));
 
-static void
-handle_fp_disabled(trapframe_t* state)
-{
-       if(in_kernel(state))
-               panic("kernel executed an FP instruction!");
+       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+       set_current_ctx_hw(pcpui, state);
+       if (emulate_fpu(state) == 0)
+       {
+               advance_pc(&pcpui->cur_ctx->tf.hw_tf);
+               return;
+       }
 
-       state->sr |= SR_EF;
+       unhandled_trap(state, "Illegal Instruction");
 }
 
 static void
-handle_syscall(trapframe_t* state)
+handle_syscall(struct hw_trapframe *state)
 {
-       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
-       uintptr_t a0 = state->gpr[4];
-       uintptr_t a1 = state->gpr[5];
+       uintptr_t a0 = state->gpr[GPR_A0];
+       uintptr_t a1 = state->gpr[GPR_A1];
 
        advance_pc(state);
+       set_current_ctx_hw(&per_cpu_info[core_id()], state);
        enable_irq();
-       struct per_cpu_info* coreinfo = &per_cpu_info[core_id()];
-
-       set_current_tf(pcpui, &state);
-
        prep_syscalls(current, (struct syscall*)a0, a1);
-
-       proc_restartcore();
 }
 
 static void
-handle_breakpoint(trapframe_t* state)
+handle_breakpoint(struct hw_trapframe *state)
 {
        advance_pc(state);
        monitor(state);
 }
 
 void
-handle_trap(trapframe_t* tf)
+handle_trap(struct hw_trapframe *hw_tf)
 {
-       typedef void (*trap_handler)(trapframe_t*);
-       
-       const static trap_handler trap_handlers[NUM_CAUSES] = {
+       static void (*const trap_handlers[])(struct hw_trapframe *) = {
          [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
          [CAUSE_FAULT_FETCH] = handle_fault_fetch,
          [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
          [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
-         [CAUSE_FP_DISABLED] = handle_fp_disabled,
-         [CAUSE_INTERRUPT] = handle_interrupt,
          [CAUSE_SYSCALL] = handle_syscall,
          [CAUSE_BREAKPOINT] = handle_breakpoint,
          [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
@@ -427,11 +302,74 @@ handle_trap(trapframe_t* tf)
          [CAUSE_FAULT_LOAD] = handle_fault_load,
          [CAUSE_FAULT_STORE] = handle_fault_store,
        };
+
+       static void (*const irq_handlers[])(struct hw_trapframe *) = {
+         [IRQ_TIMER] = handle_timer_interrupt,
+         [IRQ_HOST] = handle_host_interrupt,
+         [IRQ_IPI] = handle_interprocessor_interrupt,
+       };
        
-       int exccode = (tf->cause & CAUSE_EXCCODE) >> CAUSE_EXCCODE_SHIFT;
-       assert(exccode < NUM_CAUSES && trap_handlers[exccode]);
-       
-       trap_handlers[exccode](tf);
+       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+       if (hw_tf->cause < 0)
+       {
+               uint8_t irq = hw_tf->cause;
+               assert(irq < sizeof(irq_handlers)/sizeof(irq_handlers[0]) &&
+                      irq_handlers[irq]);
+
+               if (in_kernel(hw_tf))
+                       exit_halt_loop(hw_tf);
+               else
+                       set_current_ctx_hw(&per_cpu_info[core_id()], hw_tf);
+
+               inc_irq_depth(pcpui);
+               irq_handlers[irq](hw_tf);
+               dec_irq_depth(pcpui);
+       }
+       else
+       {
+               assert(hw_tf->cause < sizeof(trap_handlers)/sizeof(trap_handlers[0]) &&
+                      trap_handlers[hw_tf->cause]);
+               if (in_kernel(hw_tf)) {
+                       inc_ktrap_depth(pcpui);
+                       trap_handlers[hw_tf->cause](hw_tf);
+                       dec_ktrap_depth(pcpui);
+               } else {
+                       trap_handlers[hw_tf->cause](hw_tf);
+               }
+               #warning "if a trap wasn't handled fully, like an MCP pf, reflect it
+               reflect_unhandled_trap(hw_tf->tf_trapno, hw_tf->tf_err, aux);
+       }
        
-       env_pop_tf(tf);
+       extern void pop_hw_tf(struct hw_trapframe *tf); /* in asm */
+       /* Return to the current process, which should be runnable.  If we're the
+        * kernel, we should just return naturally.  Note that current and tf need
+        * to still be okay (might not be after blocking) */
+       if (in_kernel(hw_tf))
+               pop_hw_tf(hw_tf);
+       else
+               proc_restartcore();
+}
+
+/* We don't have NMIs now. */
+void send_nmi(uint32_t os_coreid)
+{
+       printk("%s not implemented\n", __FUNCTION);
+}
+
+int register_irq(int irq, isr_t handler, void *irq_arg, uint32_t tbdf)
+{
+       printk("%s not implemented\n", __FUNCTION);
+       return -1;
+}
+
+int route_irqs(int cpu_vec, int coreid)
+{
+       printk("%s not implemented\n", __FUNCTION);
+       return -1;
+}
+
+void __arch_reflect_trap_hwtf(struct hw_trapframe *hw_tf, unsigned int trap_nr,
+                              unsigned int err, unsigned long aux)
+{
+       printk("%s not implemented\n", __FUNCTION);
 }