Miscellaneous RISC-V compiler updates
[akaros.git] / kern / arch / riscv / trap.c
index 9110e42..446d9b9 100644 (file)
@@ -10,7 +10,6 @@
 #include <smp.h>
 #include <slab.h>
 #include <mm.h>
-#include <ros/mman.h>
 #include <umem.h>
 #include <pmap.h>
 
@@ -84,9 +83,8 @@ void set_stack_top(uintptr_t stacktop)
 /* Note the assertion assumes we are in the top page of the stack. */
 uintptr_t get_stack_top(void)
 {
-       uintptr_t sp, stacktop;
-       stacktop = core_stacktops[core_id()];
-       asm volatile("move %0,$sp" : "=r"(sp));
+       register uintptr_t sp asm ("sp");
+       uintptr_t stacktop = core_stacktops[core_id()];
        assert(ROUNDUP(sp, PGSIZE) == stacktop);
        return stacktop;
 }
@@ -101,14 +99,16 @@ sysenter_init(void)
 {
 }
 
-/* Helper.  For now, this copies out the TF to pcpui, and sets the tf to use it.
- * Eventually, we ought to do this in trap_entry.S.  Honestly, do whatever you
- * want with this.  The **tf is for convenience in x86. */
-static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
+/* Helper.  For now, this copies out the TF to pcpui, and sets cur_tf to point
+ * to it. */
+static void
+set_current_tf(struct per_cpu_info *pcpui, struct trapframe *tf)
 {
-       pcpui->actual_tf = **tf;
+       if (irq_is_enabled())
+               warn("Turn off IRQs until cur_tf is set!");
+       assert(!pcpui->cur_tf);
+       pcpui->actual_tf = *tf;
        pcpui->cur_tf = &pcpui->actual_tf;
-       *tf = &pcpui->actual_tf;
 }
 
 static int
@@ -142,6 +142,7 @@ format_trapframe(trapframe_t *tf, char* buf, int bufsz)
                        "sr %016lx pc %016lx va %016lx insn       %008x\n", tf->sr, tf->epc,
                        tf->badvaddr, (uint32_t)tf->insn);
 
+       buf[bufsz-1] = 0;
        return len;
 }
 
@@ -173,12 +174,6 @@ handle_ipi(trapframe_t* tf)
 {
        clear_ipi();
 
-       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
-       if (!in_kernel(tf))
-               set_current_tf(pcpui, &tf);
-       else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
-               advance_pc(tf);
-
        per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
        kernel_message_t msg_cp, *k_msg;
 
@@ -212,7 +207,8 @@ handle_ipi(trapframe_t* tf)
 
 /* Same as in x86.  Might be diff in the future if there is no way to check for
  * immediate messages or there is the ability to selectively mask IPI vectors.*/
-void process_routine_kmsg(struct trapframe *tf)
+void
+process_routine_kmsg(struct trapframe *tf)
 {
        per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
        kernel_message_t msg_cp, *k_msg;
@@ -265,17 +261,14 @@ unhandled_trap(trapframe_t* state, const char* name)
        else
        {
                char tf_buf[1024];
-               int tf_len = format_trapframe(state, tf_buf, sizeof(tf_buf));
+               format_trapframe(state, tf_buf, sizeof(tf_buf));
 
                warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
                backtrace();
                spin_unlock(&screwup_lock);
 
                assert(current);
-               proc_incref(current, 1);
                proc_destroy(current);
-
-               panic("I shouldn't have gotten here!");
        }
 }
 
@@ -286,8 +279,13 @@ handle_timer_interrupt(trapframe_t* state)
 }
 
 static void
-handle_interrupt(trapframe_t* state)
+handle_interrupt(trapframe_t* tf)
 {
+       if (!in_kernel(tf))
+               set_current_tf(&per_cpu_info[core_id()], tf);
+       else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
+               advance_pc(tf);
+       
        typedef void (*trap_handler)(trapframe_t*);
        
        const static trap_handler trap_handlers[NIRQ] = {
@@ -295,20 +293,16 @@ handle_interrupt(trapframe_t* state)
          [IPI_IRQ] = handle_ipi,
        };
 
-       uintptr_t interrupts = (state->cause & CAUSE_IP) >> CAUSE_IP_SHIFT;
+       uintptr_t interrupts = (tf->cause & CAUSE_IP) >> CAUSE_IP_SHIFT;
 
        for(uintptr_t i = 0; interrupts; interrupts >>= 1, i++)
        {
                if(interrupts & 1)
                {
                        if(trap_handlers[i])
-                               trap_handlers[i](state);
+                               trap_handlers[i](tf);
                        else
-                       {
-                               char name[32];
-                               snprintf(name, sizeof(name), "Bad Interrupt %d", i);
-                               unhandled_trap(state, name);
-                       }
+                               panic("Bad interrupt %d", i);
                }
        }
 }
@@ -337,11 +331,11 @@ handle_fault_fetch(trapframe_t* state)
        if(in_kernel(state))
        {
                print_trapframe(state);
-               panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
+               panic("Instruction Page Fault in the Kernel at %p!", state->badvaddr);
        }
        
-       if(handle_page_fault(current, state->badvaddr, PROT_READ))
-               unhandled_trap(state, "Load Page Fault");
+       if(handle_page_fault(current, state->badvaddr, PROT_EXEC))
+               unhandled_trap(state, "Instruction Page Fault");
 }
 
 static void
@@ -377,30 +371,25 @@ handle_illegal_instruction(trapframe_t* state)
 }
 
 static void
-handle_fp_disabled(trapframe_t* state)
+handle_fp_disabled(trapframe_t* tf)
 {
-       if(in_kernel(state))
+       if(in_kernel(tf))
                panic("kernel executed an FP instruction!");
 
-       state->sr |= SR_EF;
+       tf->sr |= SR_EF;
+       env_pop_tf(tf); /* We didn't save our TF, so don't use proc_restartcore */
 }
 
 static void
 handle_syscall(trapframe_t* state)
 {
-       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
        uintptr_t a0 = state->gpr[4];
        uintptr_t a1 = state->gpr[5];
 
        advance_pc(state);
+       set_current_tf(&per_cpu_info[core_id()], state);
        enable_irq();
-       struct per_cpu_info* coreinfo = &per_cpu_info[core_id()];
-
-       set_current_tf(pcpui, &state);
-
        prep_syscalls(current, (struct syscall*)a0, a1);
-
-       proc_restartcore();
 }
 
 static void
@@ -413,9 +402,7 @@ handle_breakpoint(trapframe_t* state)
 void
 handle_trap(trapframe_t* tf)
 {
-       typedef void (*trap_handler)(trapframe_t*);
-       
-       const static trap_handler trap_handlers[NUM_CAUSES] = {
+       static void (*const trap_handlers[NUM_CAUSES])(trapframe_t*) = {
          [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
          [CAUSE_FAULT_FETCH] = handle_fault_fetch,
          [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
@@ -432,10 +419,15 @@ handle_trap(trapframe_t* tf)
        
        int exccode = (tf->cause & CAUSE_EXCCODE) >> CAUSE_EXCCODE_SHIFT;
        assert(exccode < NUM_CAUSES && trap_handlers[exccode]);
-       
        trap_handlers[exccode](tf);
        
-       env_pop_tf(tf);
+       /* Return to the current process, which should be runnable.  If we're the
+        * kernel, we should just return naturally.  Note that current and tf need
+        * to still be okay (might not be after blocking) */
+       if (in_kernel(tf))
+               env_pop_tf(tf);
+       else
+               proc_restartcore();
 }
 
 /* We don't have NMIs now. */