Fix spurious panics on RISC-V keystrokes
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <arch/console.h>
5 #include <console.h>
6 #include <string.h>
7 #include <process.h>
8 #include <syscall.h>
9 #include <monitor.h>
10 #include <manager.h>
11 #include <stdio.h>
12 #include <smp.h>
13 #include <slab.h>
14 #include <mm.h>
15 #include <umem.h>
16 #include <pmap.h>
17
18 /* These are the stacks the kernel will load when it receives a trap from user
19  * space.  The deal is that they get set right away in entry.S, and can always
20  * be used for finding the top of the stack (from which you should subtract the
21  * sizeof the trapframe.  Note, we need to have a junk value in the array so
22  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
23  * kernel_init(), which is after these values get set.
24  *
25  * TODO: if these end up becoming contended cache lines, move this to
26  * per_cpu_info. */
27 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
28
29 void
30 advance_pc(trapframe_t* state)
31 {
32         state->epc += 4;
33 }
34
35 /* Set stacktop for the current core to be the stack the kernel will start on
36  * when trapping/interrupting from userspace */
37 void set_stack_top(uintptr_t stacktop)
38 {
39         core_stacktops[core_id()] = stacktop;
40 }
41
42 /* Note the assertion assumes we are in the top page of the stack. */
43 uintptr_t get_stack_top(void)
44 {
45         register uintptr_t sp asm ("sp");
46         uintptr_t stacktop = core_stacktops[core_id()];
47         assert(ROUNDUP(sp, PGSIZE) == stacktop);
48         return stacktop;
49 }
50
51 void
52 idt_init(void)
53 {
54 }
55
56 void
57 sysenter_init(void)
58 {
59 }
60
61 /* Helper.  For now, this copies out the TF to pcpui, and sets cur_tf to point
62  * to it. */
63 static void
64 set_current_tf(struct per_cpu_info *pcpui, struct trapframe *tf)
65 {
66         if (irq_is_enabled())
67                 warn("Turn off IRQs until cur_tf is set!");
68         assert(!pcpui->cur_tf);
69         pcpui->actual_tf = *tf;
70         pcpui->cur_tf = &pcpui->actual_tf;
71 }
72
73 static int
74 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
75 {
76         // slightly hackish way to read out the instruction that faulted.
77         // not guaranteed to be right 100% of the time
78         uint32_t insn;
79         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->epc,4)))
80                 insn = -1;
81
82         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
83                            tf, core_id());
84         static const char* regnames[] = {
85           "z ", "ra", "v0", "v1", "a0", "a1", "a2", "a3",
86           "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
87           "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3",
88           "s4", "s5", "s6", "s7", "s8", "fp", "sp", "tp"
89         };
90         
91         tf->gpr[0] = 0;
92         
93         for(int i = 0; i < 32; i+=4)
94         {
95                 for(int j = 0; j < 4; j++)
96                         len += snprintf(buf+len, bufsz-len,
97                                         "%s %016lx%c", regnames[i+j], tf->gpr[i+j], 
98                                         j < 3 ? ' ' : '\n');
99         }
100         len += snprintf(buf+len, bufsz-len,
101                         "sr %016lx pc %016lx va %016lx insn       %08x\n",
102                                         tf->sr, tf->epc, tf->badvaddr, insn);
103
104         buf[bufsz-1] = 0;
105         return len;
106 }
107
108 void
109 print_trapframe(trapframe_t* tf)
110 {
111         char buf[1024];
112         int len = format_trapframe(tf,buf,sizeof(buf));
113         cputbuf(buf,len);
114 }
115
116 static void exit_halt_loop(trapframe_t* tf)
117 {
118         extern char after_cpu_halt;
119         if ((char*)tf->epc >= (char*)&cpu_halt && (char*)tf->epc < &after_cpu_halt)
120                 tf->epc = tf->gpr[1];
121 }
122
123 static void handle_keypress(char c)
124 {
125         amr_t handler = c == 'G' ? __run_mon : __cons_add_char;
126         send_kernel_message(core_id(), handler, (long)&cons_buf, (long)c, 0,
127                             KMSG_ROUTINE);
128         cons_init();
129 }
130
131 static void handle_host_interrupt(trapframe_t* tf)
132 {
133         uintptr_t fh = mtpcr(PCR_FROMHOST, 0);
134         switch (fh >> 56)
135         {
136           case 0x00: return;
137           case 0x01: handle_keypress(fh); return;
138           default: assert(0);
139         }
140 }
141
142 static void handle_timer_interrupt(trapframe_t* tf)
143 {
144         timer_interrupt(tf, NULL);
145 }
146
147 /* Assumes that any IPI you get is really a kernel message */
148 static void handle_interprocessor_interrupt(trapframe_t* tf)
149 {
150         clear_ipi();
151         handle_kmsg_ipi(tf, 0);
152 }
153
154 static void
155 unhandled_trap(trapframe_t* state, const char* name)
156 {
157         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
158         spin_lock(&screwup_lock);
159
160         if(in_kernel(state))
161         {
162                 print_trapframe(state);
163                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
164         }
165         else
166         {
167                 char tf_buf[1024];
168                 format_trapframe(state, tf_buf, sizeof(tf_buf));
169
170                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
171                 backtrace();
172                 spin_unlock(&screwup_lock);
173
174                 assert(current);
175                 enable_irq();
176                 proc_destroy(current);
177         }
178 }
179
180 static void
181 handle_misaligned_fetch(trapframe_t* state)
182 {
183         unhandled_trap(state, "Misaligned Fetch");
184 }
185
186 static void
187 handle_misaligned_load(trapframe_t* state)
188 {
189         unhandled_trap(state, "Misaligned Load");
190 }
191
192 static void
193 handle_misaligned_store(trapframe_t* state)
194 {
195         unhandled_trap(state, "Misaligned Store");
196 }
197
198 static void
199 handle_fault_fetch(trapframe_t* state)
200 {
201         if(in_kernel(state))
202         {
203                 print_trapframe(state);
204                 panic("Instruction Page Fault in the Kernel at %p!", state->epc);
205         }
206
207         set_current_tf(&per_cpu_info[core_id()], state);
208
209         if(handle_page_fault(current, state->epc, PROT_EXEC))
210                 unhandled_trap(state, "Instruction Page Fault");
211 }
212
213 static void
214 handle_fault_load(trapframe_t* state)
215 {
216         if(in_kernel(state))
217         {
218                 print_trapframe(state);
219                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
220         }
221
222         set_current_tf(&per_cpu_info[core_id()], state);
223
224         if(handle_page_fault(current, state->badvaddr, PROT_READ))
225                 unhandled_trap(state, "Load Page Fault");
226 }
227
228 static void
229 handle_fault_store(trapframe_t* state)
230 {
231         if(in_kernel(state))
232         {
233                 print_trapframe(state);
234                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
235         }
236
237         set_current_tf(&per_cpu_info[core_id()], state);
238
239         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
240                 unhandled_trap(state, "Store Page Fault");
241 }
242
243 static void
244 handle_illegal_instruction(trapframe_t* state)
245 {
246         assert(!in_kernel(state));
247
248         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
249         set_current_tf(pcpui, state);
250         if (emulate_fpu(state) == 0)
251         {
252                 advance_pc(pcpui->cur_tf);
253                 return;
254         }
255
256         unhandled_trap(state, "Illegal Instruction");
257 }
258
259 static void
260 handle_fp_disabled(trapframe_t* tf)
261 {
262         if(in_kernel(tf))
263                 panic("kernel executed an FP instruction!");
264
265         tf->sr |= SR_EF;
266         env_pop_tf(tf); /* We didn't save our TF, so don't use proc_restartcore */
267 }
268
269 static void
270 handle_syscall(trapframe_t* state)
271 {
272         uintptr_t a0 = state->gpr[4];
273         uintptr_t a1 = state->gpr[5];
274
275         advance_pc(state);
276         set_current_tf(&per_cpu_info[core_id()], state);
277         enable_irq();
278         prep_syscalls(current, (struct syscall*)a0, a1);
279 }
280
281 static void
282 handle_breakpoint(trapframe_t* state)
283 {
284         advance_pc(state);
285         monitor(state);
286 }
287
288 void
289 handle_trap(trapframe_t* tf)
290 {
291         static void (*const trap_handlers[])(trapframe_t*) = {
292           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
293           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
294           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
295           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
296           [CAUSE_FP_DISABLED] = handle_fp_disabled,
297           [CAUSE_SYSCALL] = handle_syscall,
298           [CAUSE_BREAKPOINT] = handle_breakpoint,
299           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
300           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
301           [CAUSE_FAULT_LOAD] = handle_fault_load,
302           [CAUSE_FAULT_STORE] = handle_fault_store,
303         };
304
305         static void (*const irq_handlers[])(trapframe_t*) = {
306           [IRQ_TIMER] = handle_timer_interrupt,
307           [IRQ_HOST] = handle_host_interrupt,
308           [IRQ_IPI] = handle_interprocessor_interrupt,
309         };
310         
311         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
312         if (tf->cause < 0)
313         {
314                 uint8_t irq = tf->cause;
315                 assert(irq < sizeof(irq_handlers)/sizeof(irq_handlers[0]) &&
316                        irq_handlers[irq]);
317
318                 if (in_kernel(tf))
319                         exit_halt_loop(tf);
320                 else
321                         set_current_tf(&per_cpu_info[core_id()], tf);
322
323                 inc_irq_depth(pcpui);
324                 irq_handlers[irq](tf);
325                 dec_irq_depth(pcpui);
326         }
327         else
328         {
329                 assert(tf->cause < sizeof(trap_handlers)/sizeof(trap_handlers[0]) &&
330                        trap_handlers[tf->cause]);
331                 if (in_kernel(tf)) {
332                         inc_ktrap_depth(pcpui);
333                         trap_handlers[tf->cause](tf);
334                         dec_ktrap_depth(pcpui);
335                 } else {
336                         trap_handlers[tf->cause](tf);
337                 }
338         }
339         
340         /* Return to the current process, which should be runnable.  If we're the
341          * kernel, we should just return naturally.  Note that current and tf need
342          * to still be okay (might not be after blocking) */
343         if (in_kernel(tf))
344                 env_pop_tf(tf);
345         else
346                 proc_restartcore();
347 }
348
349 /* We don't have NMIs now. */
350 void send_nmi(uint32_t os_coreid)
351 {
352 }