Fixed RISC-V trap entry
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <string.h>
5 #include <process.h>
6 #include <syscall.h>
7 #include <monitor.h>
8 #include <manager.h>
9 #include <stdio.h>
10 #include <smp.h>
11 #include <slab.h>
12 #include <mm.h>
13 #include <umem.h>
14 #include <pmap.h>
15
16 /* These are the stacks the kernel will load when it receives a trap from user
17  * space.  The deal is that they get set right away in entry.S, and can always
18  * be used for finding the top of the stack (from which you should subtract the
19  * sizeof the trapframe.  Note, we need to have a junk value in the array so
20  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
21  * kernel_init(), which is after these values get set.
22  *
23  * TODO: if these end up becoming contended cache lines, move this to
24  * per_cpu_info. */
25 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
26
27 struct kmem_cache *kernel_msg_cache;
28 void kernel_msg_init(void)
29 {
30         kernel_msg_cache = kmem_cache_create("kernel_msgs",
31                            sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
32 }
33
34 spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
35 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
36
37 /* This is mostly identical to x86's, minus the different send_ipi call. */
38 uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
39                              long arg2, int type)
40 {
41         kernel_message_t *k_msg;
42         assert(pc);
43         // note this will be freed on the destination core
44         k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
45         k_msg->srcid = core_id();
46         k_msg->pc = pc;
47         k_msg->arg0 = arg0;
48         k_msg->arg1 = arg1;
49         k_msg->arg2 = arg2;
50         switch (type) {
51                 case KMSG_IMMEDIATE:
52                         spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
53                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
54                         spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
55                         break;
56                 case KMSG_ROUTINE:
57                         spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
58                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
59                         spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
60                         break;
61                 default:
62                         panic("Unknown type of kernel message!");
63         }
64         /* if we're sending a routine message locally, we don't want/need an IPI */
65         if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
66                 send_ipi(dst);
67         return 0;
68 }
69
70 void
71 advance_pc(trapframe_t* state)
72 {
73         state->epc += 4;
74 }
75
76 /* Set stacktop for the current core to be the stack the kernel will start on
77  * when trapping/interrupting from userspace */
78 void set_stack_top(uintptr_t stacktop)
79 {
80         core_stacktops[core_id()] = stacktop;
81 }
82
83 /* Note the assertion assumes we are in the top page of the stack. */
84 uintptr_t get_stack_top(void)
85 {
86         register uintptr_t sp asm ("sp");
87         uintptr_t stacktop = core_stacktops[core_id()];
88         assert(ROUNDUP(sp, PGSIZE) == stacktop);
89         return stacktop;
90 }
91
92 void
93 idt_init(void)
94 {
95 }
96
97 void
98 sysenter_init(void)
99 {
100 }
101
102 /* Helper.  For now, this copies out the TF to pcpui, and sets cur_tf to point
103  * to it. */
104 static void
105 set_current_tf(struct per_cpu_info *pcpui, struct trapframe *tf)
106 {
107         if (irq_is_enabled())
108                 warn("Turn off IRQs until cur_tf is set!");
109         assert(!pcpui->cur_tf);
110         pcpui->actual_tf = *tf;
111         pcpui->cur_tf = &pcpui->actual_tf;
112 }
113
114 static int
115 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
116 {
117         // slightly hackish way to read out the instruction that faulted.
118         // not guaranteed to be right 100% of the time
119         uint32_t insn;
120         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->epc,4)))
121                 insn = -1;
122
123         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
124                            tf, core_id());
125         static const char* regnames[] = {
126           "z ", "ra", "v0", "v1", "a0", "a1", "a2", "a3",
127           "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
128           "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3",
129           "s4", "s5", "s6", "s7", "s8", "fp", "sp", "tp"
130         };
131         
132         tf->gpr[0] = 0;
133         
134         for(int i = 0; i < 32; i+=4)
135         {
136                 for(int j = 0; j < 4; j++)
137                         len += snprintf(buf+len, bufsz-len,
138                                         "%s %016lx%c", regnames[i+j], tf->gpr[i+j], 
139                                         j < 3 ? ' ' : '\n');
140         }
141         len += snprintf(buf+len, bufsz-len,
142                         "sr %016lx pc %016lx va %016lx insn       %008x\n", tf->sr, tf->epc,
143                         tf->badvaddr, (uint32_t)tf->insn);
144
145         return len;
146 }
147
148 void
149 print_trapframe(trapframe_t* tf)
150 {
151         char buf[1024];
152         int len = format_trapframe(tf,buf,sizeof(buf));
153         cputbuf(buf,len);
154 }
155
156 /* Helper function.  Returns 0 if the list was empty. */
157 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
158                                        spinlock_t *list_lock)
159 {
160         kernel_message_t *k_msg;
161         spin_lock_irqsave(list_lock);
162         k_msg = STAILQ_FIRST(list_head);
163         if (k_msg)
164                 STAILQ_REMOVE_HEAD(list_head, link);
165         spin_unlock_irqsave(list_lock);
166         return k_msg;
167 }
168
169 /* Mostly the same as x86's implementation.  Keep them in sync.  This assumes
170  * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
171 static void
172 handle_ipi(trapframe_t* tf)
173 {
174         clear_ipi();
175
176         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
177         if (!in_kernel(tf))
178                 set_current_tf(pcpui, tf);
179         else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
180                 advance_pc(tf);
181
182         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
183         kernel_message_t msg_cp, *k_msg;
184
185         while (1) { // will break out when there are no more messages
186                 /* Try to get an immediate message.  Exec and free it. */
187                 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
188                 if (k_msg) {
189                         assert(k_msg->pc);
190                         k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
191                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
192                 } else { // no immediate, might be a routine
193                         if (in_kernel(tf))
194                                 return; // don't execute routine msgs if we were in the kernel
195                         k_msg = get_next_amsg(&myinfo->routine_amsgs,
196                                               &myinfo->routine_amsg_lock);
197                         if (!k_msg) // no routines either
198                                 return;
199                         /* copy in, and then free, in case we don't return */
200                         msg_cp = *k_msg;
201                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
202                         /* make sure an IPI is pending if we have more work */
203                         /* techincally, we don't need to lock when checking */
204                         if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
205                                 send_ipi(core_id());
206                         /* Execute the kernel message */
207                         assert(msg_cp.pc);
208                         msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
209                 }
210         }
211 }
212
213 /* Same as in x86.  Might be diff in the future if there is no way to check for
214  * immediate messages or there is the ability to selectively mask IPI vectors.*/
215 void
216 process_routine_kmsg(struct trapframe *tf)
217 {
218         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
219         kernel_message_t msg_cp, *k_msg;
220         int8_t irq_state = 0;
221
222         disable_irqsave(&irq_state);
223         /* If we were told what our TF was, use that.  o/w, go with current_tf. */
224         tf = tf ? tf : current_tf;
225         while (1) {
226                 /* normally, we want ints disabled, so we don't have an empty self-ipi
227                  * for every routine message. (imagine a long list of routines).  But we
228                  * do want immediates to run ahead of routines.  This enabling should
229                  * work (might not in some shitty VMs).  Also note we can receive an
230                  * extra self-ipi for routine messages before we turn off irqs again.
231                  * Not a big deal, since we will process it right away. */
232                 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
233                         enable_irq();
234                         cpu_relax();
235                         disable_irq();
236                 }
237                 k_msg = get_next_amsg(&myinfo->routine_amsgs,
238                                       &myinfo->routine_amsg_lock);
239                 if (!k_msg) {
240                         enable_irqsave(&irq_state);
241                         return;
242                 }
243                 /* copy in, and then free, in case we don't return */
244                 msg_cp = *k_msg;
245                 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
246                 /* make sure an IPI is pending if we have more work */
247                 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
248                         send_ipi(core_id());
249                 /* Execute the kernel message */
250                 assert(msg_cp.pc);
251                 msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
252         }
253 }
254
255 static void
256 unhandled_trap(trapframe_t* state, const char* name)
257 {
258         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
259         spin_lock(&screwup_lock);
260
261         if(in_kernel(state))
262         {
263                 print_trapframe(state);
264                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
265         }
266         else
267         {
268                 char tf_buf[1024];
269                 int tf_len = format_trapframe(state, tf_buf, sizeof(tf_buf));
270
271                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
272                 backtrace();
273                 spin_unlock(&screwup_lock);
274
275                 assert(current);
276                 proc_destroy(current);
277                 /* Not sure if RISCV has a central point that would run proc_restartcore
278                  */
279                 proc_restartcore();
280         }
281 }
282
283 static void
284 handle_timer_interrupt(trapframe_t* state)
285 {
286         timer_interrupt(state, NULL);
287 }
288
289 static void
290 handle_interrupt(trapframe_t* state)
291 {
292         typedef void (*trap_handler)(trapframe_t*);
293         
294         const static trap_handler trap_handlers[NIRQ] = {
295           [TIMER_IRQ] = handle_timer_interrupt,
296           [IPI_IRQ] = handle_ipi,
297         };
298
299         uintptr_t interrupts = (state->cause & CAUSE_IP) >> CAUSE_IP_SHIFT;
300
301         for(uintptr_t i = 0; interrupts; interrupts >>= 1, i++)
302         {
303                 if(interrupts & 1)
304                 {
305                         if(trap_handlers[i])
306                                 trap_handlers[i](state);
307                         else
308                         {
309                                 char name[32];
310                                 snprintf(name, sizeof(name), "Bad Interrupt %d", i);
311                                 unhandled_trap(state, name);
312                         }
313                 }
314         }
315 }
316
317 static void
318 handle_misaligned_fetch(trapframe_t* state)
319 {
320         unhandled_trap(state, "Misaligned Fetch");
321 }
322
323 static void
324 handle_misaligned_load(trapframe_t* state)
325 {
326         unhandled_trap(state, "Misaligned Load");
327 }
328
329 static void
330 handle_misaligned_store(trapframe_t* state)
331 {
332         unhandled_trap(state, "Misaligned Store");
333 }
334
335 static void
336 handle_fault_fetch(trapframe_t* state)
337 {
338         if(in_kernel(state))
339         {
340                 print_trapframe(state);
341                 panic("Instruction Page Fault in the Kernel at %p!", state->badvaddr);
342         }
343         
344         if(handle_page_fault(current, state->badvaddr, PROT_READ))
345                 unhandled_trap(state, "Instruction Page Fault");
346 }
347
348 static void
349 handle_fault_load(trapframe_t* state)
350 {
351         if(in_kernel(state))
352         {
353                 print_trapframe(state);
354                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
355         }
356         
357         if(handle_page_fault(current, state->badvaddr, PROT_READ))
358                 unhandled_trap(state, "Load Page Fault");
359 }
360
361 static void
362 handle_fault_store(trapframe_t* state)
363 {
364         if(in_kernel(state))
365         {
366                 print_trapframe(state);
367                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
368         }
369         
370         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
371                 unhandled_trap(state, "Store Page Fault");
372 }
373
374 static void
375 handle_illegal_instruction(trapframe_t* state)
376 {
377         unhandled_trap(state, "Illegal Instruction");
378 }
379
380 static void
381 handle_fp_disabled(trapframe_t* state)
382 {
383         if(in_kernel(state))
384                 panic("kernel executed an FP instruction!");
385
386         state->sr |= SR_EF;
387 }
388
389 static void
390 handle_syscall(trapframe_t* state)
391 {
392         uintptr_t a0 = state->gpr[4];
393         uintptr_t a1 = state->gpr[5];
394
395         advance_pc(state);
396         set_current_tf(&per_cpu_info[core_id()], state);
397         enable_irq();
398         prep_syscalls(current, (struct syscall*)a0, a1);
399 }
400
401 static void
402 handle_breakpoint(trapframe_t* state)
403 {
404         advance_pc(state);
405         monitor(state);
406 }
407
408 void
409 handle_trap(trapframe_t* tf)
410 {
411         static void (*const trap_handlers[NUM_CAUSES])(trapframe_t*) = {
412           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
413           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
414           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
415           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
416           [CAUSE_FP_DISABLED] = handle_fp_disabled,
417           [CAUSE_INTERRUPT] = handle_interrupt,
418           [CAUSE_SYSCALL] = handle_syscall,
419           [CAUSE_BREAKPOINT] = handle_breakpoint,
420           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
421           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
422           [CAUSE_FAULT_LOAD] = handle_fault_load,
423           [CAUSE_FAULT_STORE] = handle_fault_store,
424         };
425         
426         int exccode = (tf->cause & CAUSE_EXCCODE) >> CAUSE_EXCCODE_SHIFT;
427         assert(exccode < NUM_CAUSES && trap_handlers[exccode]);
428         trap_handlers[exccode](tf);
429         
430         /* Return to the current process, which should be runnable.  If we're the
431          * kernel, we should just return naturally.  Note that current and tf need
432          * to still be okay (might not be after blocking) */
433         if (in_kernel(tf))
434                 env_pop_tf(tf);
435         else
436                 proc_restartcore();
437 }
438
439 /* We don't have NMIs now. */
440 void send_nmi(uint32_t os_coreid)
441 {
442 }