Kernel message parameters are now longs
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <string.h>
5 #include <process.h>
6 #include <syscall.h>
7 #include <monitor.h>
8 #include <manager.h>
9 #include <stdio.h>
10 #include <smp.h>
11 #include <slab.h>
12 #include <mm.h>
13 #include <ros/mman.h>
14 #include <umem.h>
15 #include <pmap.h>
16
17 /* These are the stacks the kernel will load when it receives a trap from user
18  * space.  The deal is that they get set right away in entry.S, and can always
19  * be used for finding the top of the stack (from which you should subtract the
20  * sizeof the trapframe.  Note, we need to have a junk value in the array so
21  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
22  * kernel_init(), which is after these values get set.
23  *
24  * TODO: if these end up becoming contended cache lines, move this to
25  * per_cpu_info. */
26 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
27
28 struct kmem_cache *kernel_msg_cache;
29 void kernel_msg_init(void)
30 {
31         kernel_msg_cache = kmem_cache_create("kernel_msgs",
32                            sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
33 }
34
35 spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
36 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
37
38 /* This is mostly identical to x86's, minus the different send_ipi call. */
39 uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
40                              long arg2, int type)
41 {
42         kernel_message_t *k_msg;
43         assert(pc);
44         // note this will be freed on the destination core
45         k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
46         k_msg->srcid = core_id();
47         k_msg->pc = pc;
48         k_msg->arg0 = arg0;
49         k_msg->arg1 = arg1;
50         k_msg->arg2 = arg2;
51         switch (type) {
52                 case KMSG_IMMEDIATE:
53                         spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
54                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
55                         spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
56                         break;
57                 case KMSG_ROUTINE:
58                         spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
59                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
60                         spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
61                         break;
62                 default:
63                         panic("Unknown type of kernel message!");
64         }
65         /* if we're sending a routine message locally, we don't want/need an IPI */
66         if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
67                 send_ipi(dst);
68         return 0;
69 }
70
71 void
72 advance_pc(trapframe_t* state)
73 {
74         state->epc += 4;
75 }
76
77 /* Set stacktop for the current core to be the stack the kernel will start on
78  * when trapping/interrupting from userspace */
79 void set_stack_top(uintptr_t stacktop)
80 {
81         core_stacktops[core_id()] = stacktop;
82 }
83
84 /* Note the assertion assumes we are in the top page of the stack. */
85 uintptr_t get_stack_top(void)
86 {
87         uintptr_t sp, stacktop;
88         stacktop = core_stacktops[core_id()];
89         asm volatile("move %0,$sp" : "=r"(sp));
90         assert(ROUNDUP(sp, PGSIZE) == stacktop);
91         return stacktop;
92 }
93
94 void
95 idt_init(void)
96 {
97 }
98
99 void
100 sysenter_init(void)
101 {
102 }
103
104 /* Helper.  For now, this copies out the TF to pcpui, and sets the tf to use it.
105  * Eventually, we ought to do this in trap_entry.S.  Honestly, do whatever you
106  * want with this.  The **tf is for convenience in x86. */
107 static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
108 {
109         pcpui->actual_tf = **tf;
110         pcpui->cur_tf = &pcpui->actual_tf;
111         *tf = &pcpui->actual_tf;
112 }
113
114 static int
115 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
116 {
117         // slightly hackish way to read out the instruction that faulted.
118         // not guaranteed to be right 100% of the time
119         uint32_t insn;
120         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->epc,4)))
121                 insn = -1;
122
123         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
124                            tf, core_id());
125         static const char* regnames[] = {
126           "z ", "ra", "v0", "v1", "a0", "a1", "a2", "a3",
127           "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
128           "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3",
129           "s4", "s5", "s6", "s7", "s8", "fp", "sp", "tp"
130         };
131         
132         tf->gpr[0] = 0;
133         
134         for(int i = 0; i < 32; i+=4)
135         {
136                 for(int j = 0; j < 4; j++)
137                         len += snprintf(buf+len, bufsz-len,
138                                         "%s %016lx%c", regnames[i+j], tf->gpr[i+j], 
139                                         j < 3 ? ' ' : '\n');
140         }
141         len += snprintf(buf+len, bufsz-len,
142                         "sr %016lx pc %016lx va %016lx insn       %008x\n", tf->sr, tf->epc,
143                         tf->badvaddr, (uint32_t)tf->insn);
144
145         return len;
146 }
147
148 void
149 print_trapframe(trapframe_t* tf)
150 {
151         char buf[1024];
152         int len = format_trapframe(tf,buf,sizeof(buf));
153         cputbuf(buf,len);
154 }
155
156 /* Helper function.  Returns 0 if the list was empty. */
157 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
158                                        spinlock_t *list_lock)
159 {
160         kernel_message_t *k_msg;
161         spin_lock_irqsave(list_lock);
162         k_msg = STAILQ_FIRST(list_head);
163         if (k_msg)
164                 STAILQ_REMOVE_HEAD(list_head, link);
165         spin_unlock_irqsave(list_lock);
166         return k_msg;
167 }
168
169 /* Mostly the same as x86's implementation.  Keep them in sync.  This assumes
170  * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
171 static void
172 handle_ipi(trapframe_t* tf)
173 {
174         clear_ipi();
175
176         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
177         if (!in_kernel(tf))
178                 set_current_tf(pcpui, &tf);
179         else if((void*)tf->epc == &cpu_halt) // break out of the cpu_halt loop
180                 advance_pc(tf);
181
182         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
183         kernel_message_t msg_cp, *k_msg;
184
185         while (1) { // will break out when there are no more messages
186                 /* Try to get an immediate message.  Exec and free it. */
187                 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
188                 if (k_msg) {
189                         assert(k_msg->pc);
190                         k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
191                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
192                 } else { // no immediate, might be a routine
193                         if (in_kernel(tf))
194                                 return; // don't execute routine msgs if we were in the kernel
195                         k_msg = get_next_amsg(&myinfo->routine_amsgs,
196                                               &myinfo->routine_amsg_lock);
197                         if (!k_msg) // no routines either
198                                 return;
199                         /* copy in, and then free, in case we don't return */
200                         msg_cp = *k_msg;
201                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
202                         /* make sure an IPI is pending if we have more work */
203                         /* techincally, we don't need to lock when checking */
204                         if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
205                                 send_ipi(core_id());
206                         /* Execute the kernel message */
207                         assert(msg_cp.pc);
208                         msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
209                 }
210         }
211 }
212
213 /* Same as in x86.  Might be diff in the future if there is no way to check for
214  * immediate messages or there is the ability to selectively mask IPI vectors.*/
215 void process_routine_kmsg(struct trapframe *tf)
216 {
217         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
218         kernel_message_t msg_cp, *k_msg;
219         int8_t irq_state = 0;
220
221         disable_irqsave(&irq_state);
222         /* If we were told what our TF was, use that.  o/w, go with current_tf. */
223         tf = tf ? tf : current_tf;
224         while (1) {
225                 /* normally, we want ints disabled, so we don't have an empty self-ipi
226                  * for every routine message. (imagine a long list of routines).  But we
227                  * do want immediates to run ahead of routines.  This enabling should
228                  * work (might not in some shitty VMs).  Also note we can receive an
229                  * extra self-ipi for routine messages before we turn off irqs again.
230                  * Not a big deal, since we will process it right away. */
231                 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
232                         enable_irq();
233                         cpu_relax();
234                         disable_irq();
235                 }
236                 k_msg = get_next_amsg(&myinfo->routine_amsgs,
237                                       &myinfo->routine_amsg_lock);
238                 if (!k_msg) {
239                         enable_irqsave(&irq_state);
240                         return;
241                 }
242                 /* copy in, and then free, in case we don't return */
243                 msg_cp = *k_msg;
244                 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
245                 /* make sure an IPI is pending if we have more work */
246                 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
247                         send_ipi(core_id());
248                 /* Execute the kernel message */
249                 assert(msg_cp.pc);
250                 msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
251         }
252 }
253
254 static void
255 unhandled_trap(trapframe_t* state, const char* name)
256 {
257         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
258         spin_lock(&screwup_lock);
259
260         if(in_kernel(state))
261         {
262                 print_trapframe(state);
263                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
264         }
265         else
266         {
267                 char tf_buf[1024];
268                 int tf_len = format_trapframe(state, tf_buf, sizeof(tf_buf));
269
270                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
271                 backtrace();
272                 spin_unlock(&screwup_lock);
273
274                 assert(current);
275                 proc_incref(current, 1);
276                 proc_destroy(current);
277
278                 panic("I shouldn't have gotten here!");
279         }
280 }
281
282 static void
283 handle_timer_interrupt(trapframe_t* state)
284 {
285         timer_interrupt(state, NULL);
286 }
287
288 static void
289 handle_interrupt(trapframe_t* state)
290 {
291         typedef void (*trap_handler)(trapframe_t*);
292         
293         const static trap_handler trap_handlers[NIRQ] = {
294           [TIMER_IRQ] = handle_timer_interrupt,
295           [IPI_IRQ] = handle_ipi,
296         };
297
298         uintptr_t interrupts = (state->cause & CAUSE_IP) >> CAUSE_IP_SHIFT;
299
300         for(uintptr_t i = 0; interrupts; interrupts >>= 1, i++)
301         {
302                 if(interrupts & 1)
303                 {
304                         if(trap_handlers[i])
305                                 trap_handlers[i](state);
306                         else
307                         {
308                                 char name[32];
309                                 snprintf(name, sizeof(name), "Bad Interrupt %d", i);
310                                 unhandled_trap(state, name);
311                         }
312                 }
313         }
314 }
315
316 static void
317 handle_misaligned_fetch(trapframe_t* state)
318 {
319         unhandled_trap(state, "Misaligned Fetch");
320 }
321
322 static void
323 handle_misaligned_load(trapframe_t* state)
324 {
325         unhandled_trap(state, "Misaligned Load");
326 }
327
328 static void
329 handle_misaligned_store(trapframe_t* state)
330 {
331         unhandled_trap(state, "Misaligned Store");
332 }
333
334 static void
335 handle_fault_fetch(trapframe_t* state)
336 {
337         if(in_kernel(state))
338         {
339                 print_trapframe(state);
340                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
341         }
342         
343         if(handle_page_fault(current, state->badvaddr, PROT_READ))
344                 unhandled_trap(state, "Load Page Fault");
345 }
346
347 static void
348 handle_fault_load(trapframe_t* state)
349 {
350         if(in_kernel(state))
351         {
352                 print_trapframe(state);
353                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
354         }
355         
356         if(handle_page_fault(current, state->badvaddr, PROT_READ))
357                 unhandled_trap(state, "Load Page Fault");
358 }
359
360 static void
361 handle_fault_store(trapframe_t* state)
362 {
363         if(in_kernel(state))
364         {
365                 print_trapframe(state);
366                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
367         }
368         
369         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
370                 unhandled_trap(state, "Store Page Fault");
371 }
372
373 static void
374 handle_illegal_instruction(trapframe_t* state)
375 {
376         unhandled_trap(state, "Illegal Instruction");
377 }
378
379 static void
380 handle_fp_disabled(trapframe_t* state)
381 {
382         if(in_kernel(state))
383                 panic("kernel executed an FP instruction!");
384
385         state->sr |= SR_EF;
386 }
387
388 static void
389 handle_syscall(trapframe_t* state)
390 {
391         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
392         uintptr_t a0 = state->gpr[4];
393         uintptr_t a1 = state->gpr[5];
394
395         advance_pc(state);
396         enable_irq();
397         struct per_cpu_info* coreinfo = &per_cpu_info[core_id()];
398
399         set_current_tf(pcpui, &state);
400
401         prep_syscalls(current, (struct syscall*)a0, a1);
402
403         proc_restartcore();
404 }
405
406 static void
407 handle_breakpoint(trapframe_t* state)
408 {
409         advance_pc(state);
410         monitor(state);
411 }
412
413 void
414 handle_trap(trapframe_t* tf)
415 {
416         typedef void (*trap_handler)(trapframe_t*);
417         
418         const static trap_handler trap_handlers[NUM_CAUSES] = {
419           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
420           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
421           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
422           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
423           [CAUSE_FP_DISABLED] = handle_fp_disabled,
424           [CAUSE_INTERRUPT] = handle_interrupt,
425           [CAUSE_SYSCALL] = handle_syscall,
426           [CAUSE_BREAKPOINT] = handle_breakpoint,
427           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
428           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
429           [CAUSE_FAULT_LOAD] = handle_fault_load,
430           [CAUSE_FAULT_STORE] = handle_fault_store,
431         };
432         
433         int exccode = (tf->cause & CAUSE_EXCCODE) >> CAUSE_EXCCODE_SHIFT;
434         assert(exccode < NUM_CAUSES && trap_handlers[exccode]);
435         
436         trap_handlers[exccode](tf);
437         
438         env_pop_tf(tf);
439 }
440
441 /* We don't have NMIs now. */
442 void send_nmi(uint32_t os_coreid)
443 {
444 }