Changes to RISC-V supervisor mode
[akaros.git] / kern / arch / riscv / trap.c
index 5492ebf..a8b3f0a 100644 (file)
@@ -10,7 +10,6 @@
 #include <smp.h>
 #include <slab.h>
 #include <mm.h>
 #include <smp.h>
 #include <slab.h>
 #include <mm.h>
-#include <ros/mman.h>
 #include <umem.h>
 #include <pmap.h>
 
 #include <umem.h>
 #include <pmap.h>
 
@@ -36,8 +35,8 @@ spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
 
 /* This is mostly identical to x86's, minus the different send_ipi call. */
 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
 
 /* This is mostly identical to x86's, minus the different send_ipi call. */
-uint32_t send_kernel_message(uint32_t dst, amr_t pc,
-                             TV(a0t) arg0, TV(a1t) arg1, TV(a2t) arg2, int type)
+uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
+                             long arg2, int type)
 {
        kernel_message_t *k_msg;
        assert(pc);
 {
        kernel_message_t *k_msg;
        assert(pc);
@@ -84,9 +83,8 @@ void set_stack_top(uintptr_t stacktop)
 /* Note the assertion assumes we are in the top page of the stack. */
 uintptr_t get_stack_top(void)
 {
 /* Note the assertion assumes we are in the top page of the stack. */
 uintptr_t get_stack_top(void)
 {
-       uintptr_t sp, stacktop;
-       stacktop = core_stacktops[core_id()];
-       asm volatile("move %0,$sp" : "=r"(sp));
+       register uintptr_t sp asm ("sp");
+       uintptr_t stacktop = core_stacktops[core_id()];
        assert(ROUNDUP(sp, PGSIZE) == stacktop);
        return stacktop;
 }
        assert(ROUNDUP(sp, PGSIZE) == stacktop);
        return stacktop;
 }
@@ -101,14 +99,16 @@ sysenter_init(void)
 {
 }
 
 {
 }
 
-/* Helper.  For now, this copies out the TF to pcpui, and sets the tf to use it.
- * Eventually, we ought to do this in trap_entry.S.  Honestly, do whatever you
- * want with this.  The **tf is for convenience in x86. */
-static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
+/* Helper.  For now, this copies out the TF to pcpui, and sets cur_tf to point
+ * to it. */
+static void
+set_current_tf(struct per_cpu_info *pcpui, struct trapframe *tf)
 {
 {
-       pcpui->actual_tf = **tf;
+       if (irq_is_enabled())
+               warn("Turn off IRQs until cur_tf is set!");
+       assert(!pcpui->cur_tf);
+       pcpui->actual_tf = *tf;
        pcpui->cur_tf = &pcpui->actual_tf;
        pcpui->cur_tf = &pcpui->actual_tf;
-       *tf = &pcpui->actual_tf;
 }
 
 static int
 }
 
 static int
@@ -142,6 +142,7 @@ format_trapframe(trapframe_t *tf, char* buf, int bufsz)
                        "sr %016lx pc %016lx va %016lx insn       %008x\n", tf->sr, tf->epc,
                        tf->badvaddr, (uint32_t)tf->insn);
 
                        "sr %016lx pc %016lx va %016lx insn       %008x\n", tf->sr, tf->epc,
                        tf->badvaddr, (uint32_t)tf->insn);
 
+       buf[bufsz-1] = 0;
        return len;
 }
 
        return len;
 }
 
@@ -171,11 +172,12 @@ static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
 static void
 handle_ipi(trapframe_t* tf)
 {
 static void
 handle_ipi(trapframe_t* tf)
 {
-       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
        if (!in_kernel(tf))
        if (!in_kernel(tf))
-               set_current_tf(pcpui, &tf);
+               set_current_tf(&per_cpu_info[core_id()], tf);
        else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
                advance_pc(tf);
        else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
                advance_pc(tf);
+       
+       clear_ipi();
 
        per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
        kernel_message_t msg_cp, *k_msg;
 
        per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
        kernel_message_t msg_cp, *k_msg;
@@ -210,7 +212,8 @@ handle_ipi(trapframe_t* tf)
 
 /* Same as in x86.  Might be diff in the future if there is no way to check for
  * immediate messages or there is the ability to selectively mask IPI vectors.*/
 
 /* Same as in x86.  Might be diff in the future if there is no way to check for
  * immediate messages or there is the ability to selectively mask IPI vectors.*/
-void process_routine_kmsg(struct trapframe *tf)
+void
+process_routine_kmsg(struct trapframe *tf)
 {
        per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
        kernel_message_t msg_cp, *k_msg;
 {
        per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
        kernel_message_t msg_cp, *k_msg;
@@ -263,52 +266,26 @@ unhandled_trap(trapframe_t* state, const char* name)
        else
        {
                char tf_buf[1024];
        else
        {
                char tf_buf[1024];
-               int tf_len = format_trapframe(state, tf_buf, sizeof(tf_buf));
+               format_trapframe(state, tf_buf, sizeof(tf_buf));
 
                warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
                backtrace();
                spin_unlock(&screwup_lock);
 
                assert(current);
 
                warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
                backtrace();
                spin_unlock(&screwup_lock);
 
                assert(current);
-               proc_incref(current, 1);
                proc_destroy(current);
                proc_destroy(current);
-
-               panic("I shouldn't have gotten here!");
        }
 }
 
 static void
        }
 }
 
 static void
-handle_timer_interrupt(trapframe_t* state)
-{
-       timer_interrupt(state, NULL);
-}
-
-static void
-handle_interrupt(trapframe_t* state)
+handle_timer_interrupt(trapframe_t* tf)
 {
 {
-       typedef void (*trap_handler)(trapframe_t*);
+       if (!in_kernel(tf))
+               set_current_tf(&per_cpu_info[core_id()], tf);
+       else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
+               advance_pc(tf);
        
        
-       const static trap_handler trap_handlers[NIRQ] = {
-         [TIMER_IRQ] = handle_timer_interrupt,
-         [IPI_IRQ] = handle_ipi,
-       };
-
-       int interrupts = (state->cause & CAUSE_IP) >> CAUSE_IP_SHIFT;
-
-       for(int i = 0; interrupts; interrupts >>= 1, i++)
-       {
-               if(interrupts & 1)
-               {
-                       if(trap_handlers[i])
-                               trap_handlers[i](state);
-                       else
-                       {
-                               char name[32];
-                               snprintf(name, sizeof(name), "Bad Interrupt %d", i);
-                               unhandled_trap(state, name);
-                       }
-               }
-       }
+       timer_interrupt(tf, NULL);
 }
 
 static void
 }
 
 static void
@@ -335,11 +312,11 @@ handle_fault_fetch(trapframe_t* state)
        if(in_kernel(state))
        {
                print_trapframe(state);
        if(in_kernel(state))
        {
                print_trapframe(state);
-               panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
+               panic("Instruction Page Fault in the Kernel at %p!", state->badvaddr);
        }
        
        }
        
-       if(handle_page_fault(current, state->badvaddr, PROT_READ))
-               unhandled_trap(state, "Load Page Fault");
+       if(handle_page_fault(current, state->badvaddr, PROT_EXEC))
+               unhandled_trap(state, "Instruction Page Fault");
 }
 
 static void
 }
 
 static void
@@ -375,30 +352,25 @@ handle_illegal_instruction(trapframe_t* state)
 }
 
 static void
 }
 
 static void
-handle_fp_disabled(trapframe_t* state)
+handle_fp_disabled(trapframe_t* tf)
 {
 {
-       if(in_kernel(state))
+       if(in_kernel(tf))
                panic("kernel executed an FP instruction!");
 
                panic("kernel executed an FP instruction!");
 
-       state->sr |= SR_EF;
+       tf->sr |= SR_EF;
+       env_pop_tf(tf); /* We didn't save our TF, so don't use proc_restartcore */
 }
 
 static void
 handle_syscall(trapframe_t* state)
 {
 }
 
 static void
 handle_syscall(trapframe_t* state)
 {
-       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
        uintptr_t a0 = state->gpr[4];
        uintptr_t a1 = state->gpr[5];
 
        advance_pc(state);
        uintptr_t a0 = state->gpr[4];
        uintptr_t a1 = state->gpr[5];
 
        advance_pc(state);
+       set_current_tf(&per_cpu_info[core_id()], state);
        enable_irq();
        enable_irq();
-       struct per_cpu_info* coreinfo = &per_cpu_info[core_id()];
-
-       set_current_tf(pcpui, &state);
-
        prep_syscalls(current, (struct syscall*)a0, a1);
        prep_syscalls(current, (struct syscall*)a0, a1);
-
-       proc_restartcore();
 }
 
 static void
 }
 
 static void
@@ -411,29 +383,32 @@ handle_breakpoint(trapframe_t* state)
 void
 handle_trap(trapframe_t* tf)
 {
 void
 handle_trap(trapframe_t* tf)
 {
-       typedef void (*trap_handler)(trapframe_t*);
-       
-       const static trap_handler trap_handlers[NUM_CAUSES] = {
+       static void (*const trap_handlers[NUM_CAUSES])(trapframe_t*) = {
          [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
          [CAUSE_FAULT_FETCH] = handle_fault_fetch,
          [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
          [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
          [CAUSE_FP_DISABLED] = handle_fp_disabled,
          [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
          [CAUSE_FAULT_FETCH] = handle_fault_fetch,
          [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
          [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
          [CAUSE_FP_DISABLED] = handle_fp_disabled,
-         [CAUSE_INTERRUPT] = handle_interrupt,
          [CAUSE_SYSCALL] = handle_syscall,
          [CAUSE_BREAKPOINT] = handle_breakpoint,
          [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
          [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
          [CAUSE_FAULT_LOAD] = handle_fault_load,
          [CAUSE_FAULT_STORE] = handle_fault_store,
          [CAUSE_SYSCALL] = handle_syscall,
          [CAUSE_BREAKPOINT] = handle_breakpoint,
          [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
          [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
          [CAUSE_FAULT_LOAD] = handle_fault_load,
          [CAUSE_FAULT_STORE] = handle_fault_store,
+         [CAUSE_IRQ0 + IPI_IRQ] = handle_ipi,
+         [CAUSE_IRQ0 + TIMER_IRQ] = handle_timer_interrupt,
        };
        
        };
        
-       int exccode = (tf->cause & CAUSE_EXCCODE) >> CAUSE_EXCCODE_SHIFT;
-       assert(exccode < NUM_CAUSES && trap_handlers[exccode]);
-       
-       trap_handlers[exccode](tf);
+       assert(tf->cause < NUM_CAUSES && trap_handlers[tf->cause]);
+       trap_handlers[tf->cause](tf);
        
        
-       env_pop_tf(tf);
+       /* Return to the current process, which should be runnable.  If we're the
+        * kernel, we should just return naturally.  Note that current and tf need
+        * to still be okay (might not be after blocking) */
+       if (in_kernel(tf))
+               env_pop_tf(tf);
+       else
+               proc_restartcore();
 }
 
 /* We don't have NMIs now. */
 }
 
 /* We don't have NMIs now. */