use new risc-v i/o abi
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <arch/console.h>
5 #include <string.h>
6 #include <process.h>
7 #include <syscall.h>
8 #include <monitor.h>
9 #include <manager.h>
10 #include <stdio.h>
11 #include <smp.h>
12 #include <slab.h>
13 #include <mm.h>
14 #include <umem.h>
15 #include <pmap.h>
16
17 /* These are the stacks the kernel will load when it receives a trap from user
18  * space.  The deal is that they get set right away in entry.S, and can always
19  * be used for finding the top of the stack (from which you should subtract the
20  * sizeof the trapframe.  Note, we need to have a junk value in the array so
21  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
22  * kernel_init(), which is after these values get set.
23  *
24  * TODO: if these end up becoming contended cache lines, move this to
25  * per_cpu_info. */
26 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
27
28 void
29 advance_pc(trapframe_t* state)
30 {
31         state->epc += 4;
32 }
33
34 /* Set stacktop for the current core to be the stack the kernel will start on
35  * when trapping/interrupting from userspace */
36 void set_stack_top(uintptr_t stacktop)
37 {
38         core_stacktops[core_id()] = stacktop;
39 }
40
41 /* Note the assertion assumes we are in the top page of the stack. */
42 uintptr_t get_stack_top(void)
43 {
44         register uintptr_t sp asm ("sp");
45         uintptr_t stacktop = core_stacktops[core_id()];
46         assert(ROUNDUP(sp, PGSIZE) == stacktop);
47         return stacktop;
48 }
49
50 void
51 idt_init(void)
52 {
53 }
54
55 void
56 sysenter_init(void)
57 {
58 }
59
60 /* Helper.  For now, this copies out the TF to pcpui, and sets cur_tf to point
61  * to it. */
62 static void
63 set_current_tf(struct per_cpu_info *pcpui, struct trapframe *tf)
64 {
65         if (irq_is_enabled())
66                 warn("Turn off IRQs until cur_tf is set!");
67         assert(!pcpui->cur_tf);
68         pcpui->actual_tf = *tf;
69         pcpui->cur_tf = &pcpui->actual_tf;
70 }
71
72 static int
73 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
74 {
75         // slightly hackish way to read out the instruction that faulted.
76         // not guaranteed to be right 100% of the time
77         uint32_t insn;
78         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->epc,4)))
79                 insn = -1;
80
81         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
82                            tf, core_id());
83         static const char* regnames[] = {
84           "z ", "ra", "v0", "v1", "a0", "a1", "a2", "a3",
85           "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
86           "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3",
87           "s4", "s5", "s6", "s7", "s8", "fp", "sp", "tp"
88         };
89         
90         tf->gpr[0] = 0;
91         
92         for(int i = 0; i < 32; i+=4)
93         {
94                 for(int j = 0; j < 4; j++)
95                         len += snprintf(buf+len, bufsz-len,
96                                         "%s %016lx%c", regnames[i+j], tf->gpr[i+j], 
97                                         j < 3 ? ' ' : '\n');
98         }
99         len += snprintf(buf+len, bufsz-len,
100                         "sr %016lx pc %016lx va %016lx insn       %08x\n",
101                                         tf->sr, tf->epc, tf->badvaddr, insn);
102
103         buf[bufsz-1] = 0;
104         return len;
105 }
106
107 void
108 print_trapframe(trapframe_t* tf)
109 {
110         char buf[1024];
111         int len = format_trapframe(tf,buf,sizeof(buf));
112         cputbuf(buf,len);
113 }
114 static void exit_halt_loop(trapframe_t* tf)
115 {
116         extern char after_cpu_halt;
117         if ((char*)tf->epc >= (char*)&cpu_halt && (char*)tf->epc < &after_cpu_halt)
118                 tf->epc = tf->gpr[1];
119 }
120
121 /* Assumes that any IPI you get is really a kernel message */
122 static void
123 handle_ipi(trapframe_t* tf)
124 {
125         clear_ipi();
126         poll_keyboard(); // keypresses can trigger IPIs
127
128         if (!in_kernel(tf))
129                 set_current_tf(&per_cpu_info[core_id()], tf);
130         else
131                 exit_halt_loop(tf);
132
133         handle_kmsg_ipi(tf, 0);
134 }
135
136 static void
137 unhandled_trap(trapframe_t* state, const char* name)
138 {
139         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
140         spin_lock(&screwup_lock);
141
142         if(in_kernel(state))
143         {
144                 print_trapframe(state);
145                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
146         }
147         else
148         {
149                 char tf_buf[1024];
150                 format_trapframe(state, tf_buf, sizeof(tf_buf));
151
152                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
153                 backtrace();
154                 spin_unlock(&screwup_lock);
155
156                 assert(current);
157                 enable_irq();
158                 proc_destroy(current);
159         }
160 }
161
162 static void
163 handle_timer_interrupt(trapframe_t* tf)
164 {
165         if (!in_kernel(tf))
166                 set_current_tf(&per_cpu_info[core_id()], tf);
167         else
168                 exit_halt_loop(tf);
169         
170         timer_interrupt(tf, NULL);
171 }
172
173 static void
174 handle_misaligned_fetch(trapframe_t* state)
175 {
176         unhandled_trap(state, "Misaligned Fetch");
177 }
178
179 static void
180 handle_misaligned_load(trapframe_t* state)
181 {
182         unhandled_trap(state, "Misaligned Load");
183 }
184
185 static void
186 handle_misaligned_store(trapframe_t* state)
187 {
188         unhandled_trap(state, "Misaligned Store");
189 }
190
191 static void
192 handle_fault_fetch(trapframe_t* state)
193 {
194         if(in_kernel(state))
195         {
196                 print_trapframe(state);
197                 panic("Instruction Page Fault in the Kernel at %p!", state->epc);
198         }
199
200         set_current_tf(&per_cpu_info[core_id()], state);
201
202         if(handle_page_fault(current, state->epc, PROT_EXEC))
203                 unhandled_trap(state, "Instruction Page Fault");
204 }
205
206 static void
207 handle_fault_load(trapframe_t* state)
208 {
209         if(in_kernel(state))
210         {
211                 print_trapframe(state);
212                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
213         }
214
215         set_current_tf(&per_cpu_info[core_id()], state);
216
217         if(handle_page_fault(current, state->badvaddr, PROT_READ))
218                 unhandled_trap(state, "Load Page Fault");
219 }
220
221 static void
222 handle_fault_store(trapframe_t* state)
223 {
224         if(in_kernel(state))
225         {
226                 print_trapframe(state);
227                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
228         }
229         
230         set_current_tf(&per_cpu_info[core_id()], state);
231
232         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
233                 unhandled_trap(state, "Store Page Fault");
234 }
235
236 static void
237 handle_illegal_instruction(trapframe_t* state)
238 {
239         set_current_tf(&per_cpu_info[core_id()], state);
240
241         if (emulate_fpu(state) == 0)
242         {
243                 advance_pc(per_cpu_info[core_id()].cur_tf);
244                 return;
245         }
246
247         unhandled_trap(state, "Illegal Instruction");
248 }
249
250 static void
251 handle_fp_disabled(trapframe_t* tf)
252 {
253         if(in_kernel(tf))
254                 panic("kernel executed an FP instruction!");
255
256         tf->sr |= SR_EF;
257         env_pop_tf(tf); /* We didn't save our TF, so don't use proc_restartcore */
258 }
259
260 static void
261 handle_syscall(trapframe_t* state)
262 {
263         uintptr_t a0 = state->gpr[4];
264         uintptr_t a1 = state->gpr[5];
265
266         advance_pc(state);
267         set_current_tf(&per_cpu_info[core_id()], state);
268         enable_irq();
269         prep_syscalls(current, (struct syscall*)a0, a1);
270 }
271
272 static void
273 handle_breakpoint(trapframe_t* state)
274 {
275         advance_pc(state);
276         monitor(state);
277 }
278
279 void
280 handle_trap(trapframe_t* tf)
281 {
282         static void (*const trap_handlers[])(trapframe_t*) = {
283           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
284           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
285           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
286           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
287           [CAUSE_FP_DISABLED] = handle_fp_disabled,
288           [CAUSE_SYSCALL] = handle_syscall,
289           [CAUSE_BREAKPOINT] = handle_breakpoint,
290           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
291           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
292           [CAUSE_FAULT_LOAD] = handle_fault_load,
293           [CAUSE_FAULT_STORE] = handle_fault_store,
294         };
295
296         static void (*const irq_handlers[])(trapframe_t*) = {
297           [IRQ_TIMER] = handle_timer_interrupt,
298           [IRQ_IPI] = handle_ipi,
299         };
300         
301         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
302         if (tf->cause < 0)
303         {
304                 uint8_t irq = tf->cause;
305                 assert(irq < sizeof(irq_handlers)/sizeof(irq_handlers[0]) &&
306                        irq_handlers[irq]);
307                 inc_irq_depth(pcpui);
308                 irq_handlers[irq](tf);
309                 dec_irq_depth(pcpui);
310         }
311         else
312         {
313                 assert(tf->cause < sizeof(trap_handlers)/sizeof(trap_handlers[0]) &&
314                        trap_handlers[tf->cause]);
315                 if (in_kernel(tf)) {
316                         inc_ktrap_depth(pcpui);
317                         trap_handlers[tf->cause](tf);
318                         dec_ktrap_depth(pcpui);
319                 } else {
320                         trap_handlers[tf->cause](tf);
321                 }
322         }
323         
324         /* Return to the current process, which should be runnable.  If we're the
325          * kernel, we should just return naturally.  Note that current and tf need
326          * to still be okay (might not be after blocking) */
327         if (in_kernel(tf))
328                 env_pop_tf(tf);
329         else
330                 proc_restartcore();
331 }
332
333 /* We don't have NMIs now. */
334 void send_nmi(uint32_t os_coreid)
335 {
336 }