net: Use NULL to signal lack of promisc/multicast
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <trap.h>
4 #include <arch/console.h>
5 #include <string.h>
6 #include <process.h>
7 #include <syscall.h>
8 #include <monitor.h>
9 #include <manager.h>
10 #include <stdio.h>
11 #include <smp.h>
12 #include <slab.h>
13 #include <mm.h>
14 #include <umem.h>
15 #include <pmap.h>
16
17 /* These are the stacks the kernel will load when it receives a trap from user
18  * space.  The deal is that they get set right away in entry.S, and can always
19  * be used for finding the top of the stack (from which you should subtract the
20  * sizeof the trapframe.  Note, we need to have a junk value in the array so
21  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
22  * kernel_init(), which is after these values get set.
23  *
24  * TODO: if these end up becoming contended cache lines, move this to
25  * per_cpu_info. */
26 uintptr_t core_stacktops[MAX_NUM_CORES] = {0xcafebabe, 0};
27
28 void
29 advance_pc(struct hw_trapframe *state)
30 {
31         state->epc += 4;
32 }
33
34 /* Set stacktop for the current core to be the stack the kernel will start on
35  * when trapping/interrupting from userspace */
36 void set_stack_top(uintptr_t stacktop)
37 {
38         core_stacktops[core_id()] = stacktop;
39 }
40
41 /* Note the assertion assumes we are in the top page of the stack. */
42 uintptr_t get_stack_top(void)
43 {
44         register uintptr_t sp asm ("sp");
45         uintptr_t stacktop = core_stacktops[core_id()];
46         assert(ROUNDUP(sp, PGSIZE) == stacktop);
47         return stacktop;
48 }
49
50 void
51 idt_init(void)
52 {
53 }
54
55 /* Helper.  For now, this copies out the TF to pcpui, and sets cur_ctx to point
56  * to it. */
57 static void set_current_ctx_hw(struct per_cpu_info *pcpui,
58                                struct hw_trapframe *hw_tf)
59 {
60         if (irq_is_enabled())
61                 warn("Turn off IRQs until cur_ctx is set!");
62         assert(!pcpui->cur_ctx);
63         pcpui->actual_ctx.type = ROS_HW_CTX;
64         pcpui->actual_ctx.tf.hw_tf = *hw_tf;
65         pcpui->cur_ctx = &pcpui->actual_ctx;
66 }
67
68 static void set_current_ctx_sw(struct per_cpu_info *pcpui,
69                                struct sw_trapframe *sw_tf)
70 {
71         if (irq_is_enabled())
72                 warn("Turn off IRQs until cur_ctx is set!");
73         assert(!pcpui->cur_ctx);
74         pcpui->actual_ctx.type = ROS_SW_CTX;
75         pcpui->actual_ctx.tf.sw_tf = *sw_tf;
76         pcpui->cur_ctx = &pcpui->actual_ctx;
77 }
78
79 static int
80 format_trapframe(struct hw_trapframe *hw_tf, char* buf, int bufsz)
81 {
82         // slightly hackish way to read out the instruction that faulted.
83         // not guaranteed to be right 100% of the time
84         uint32_t insn;
85         if(!(current && !memcpy_from_user(current,&insn,(void*)hw_tf->epc,4)))
86                 insn = -1;
87
88         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
89                            hw_tf, core_id());
90         static const char* regnames[] = {
91           "z ", "ra", "s0", "s1", "s2", "s3", "s4", "s5",
92           "s6", "s7", "s8", "s9", "sA", "sB", "sp", "tp",
93           "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5",
94           "a6", "a7", "a8", "a9", "aA", "aB", "aC", "aD"
95         };
96
97         hw_tf->gpr[0] = 0;
98
99         for(int i = 0; i < 32; i+=4)
100         {
101                 for(int j = 0; j < 4; j++)
102                         len += snprintf(buf+len, bufsz-len,
103                                         "%s %016lx%c", regnames[i+j], hw_tf->gpr[i+j],
104                                         j < 3 ? ' ' : '\n');
105         }
106         len += snprintf(buf+len, bufsz-len,
107                         "sr %016lx pc %016lx va %016lx insn       %08x\n",
108                                         hw_tf->sr, hw_tf->epc, hw_tf->badvaddr, insn);
109
110         buf[bufsz-1] = 0;
111         return len;
112 }
113
114 void print_trapframe(struct hw_trapframe *hw_tf)
115 {
116         char buf[1024];
117         int len = format_trapframe(hw_tf, buf, sizeof(buf));
118         cputbuf(buf,len);
119 }
120
121 void print_swtrapframe(struct sw_trapframe *sw_tf)
122 {
123         #warning "fix me"
124 }
125
126 void print_vmtrapframe(struct vm_trapframe *vm_tf)
127 {
128         #warning "fix me"
129 }
130
131 static void exit_halt_loop(struct hw_trapframe *hw_tf)
132 {
133         extern char after_cpu_halt;
134         if ((char*)hw_tf->epc >= (char*)&cpu_halt &&
135             (char*)hw_tf->epc < &after_cpu_halt)
136                 hw_tf->epc = hw_tf->gpr[GPR_RA];
137 }
138
139 static void handle_keypress(char c)
140 {
141         #warning "fix me"
142         /* TODO: does cons_init need to be before cons_add_char?  Also, do something
143          * with CTRL-G, Q, and B. */
144         cons_add_char(c);
145
146         cons_init();
147 }
148
149 static void handle_host_interrupt(struct hw_trapframe *hw_tf)
150 {
151         uintptr_t fh = mtpcr(PCR_FROMHOST, 0);
152         switch (fh >> 56)
153         {
154           case 0x00: return;
155           case 0x01: handle_keypress(fh); return;
156           default: assert(0);
157         }
158 }
159
160 static void handle_timer_interrupt(struct hw_trapframe *hw_tf)
161 {
162         timer_interrupt(hw_tf, NULL);
163 }
164
165 /* Assumes that any IPI you get is really a kernel message */
166 static void handle_interprocessor_interrupt(struct hw_trapframe *hw_tf)
167 {
168         clear_ipi();
169         handle_kmsg_ipi(hw_tf, 0);
170 }
171
172 static void
173 unhandled_trap(struct hw_trapframe *state, const char* name)
174 {
175         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
176         spin_lock(&screwup_lock);
177
178         if(in_kernel(state))
179         {
180                 print_trapframe(state);
181                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
182         }
183         else
184         {
185                 char tf_buf[1024];
186                 format_trapframe(state, tf_buf, sizeof(tf_buf));
187
188                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
189                 backtrace();
190                 spin_unlock(&screwup_lock);
191
192                 assert(current);
193                 proc_destroy(current);
194         }
195 }
196
197 static void
198 handle_misaligned_fetch(struct hw_trapframe *state)
199 {
200         unhandled_trap(state, "Misaligned Fetch");
201 }
202
203 static void
204 handle_misaligned_load(struct hw_trapframe *state)
205 {
206         unhandled_trap(state, "Misaligned Load");
207 }
208
209 static void
210 handle_misaligned_store(struct hw_trapframe *state)
211 {
212         unhandled_trap(state, "Misaligned Store");
213 }
214
215 static void
216 handle_fault_fetch(struct hw_trapframe *state)
217 {
218         if(in_kernel(state))
219         {
220                 print_trapframe(state);
221                 panic("Instruction Page Fault in the Kernel at %p!", state->epc);
222         }
223
224         set_current_ctx_hw(&per_cpu_info[core_id()], state);
225
226 #warning "returns EAGAIN if you should reflect the fault"
227         if(handle_page_fault(current, state->epc, PROT_EXEC))
228                 unhandled_trap(state, "Instruction Page Fault");
229 }
230
231 static void
232 handle_fault_load(struct hw_trapframe *state)
233 {
234         if(in_kernel(state))
235         {
236                 print_trapframe(state);
237                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
238         }
239
240         set_current_ctx_hw(&per_cpu_info[core_id()], state);
241
242 #warning "returns EAGAIN if you should reflect the fault"
243         if(handle_page_fault(current, state->badvaddr, PROT_READ))
244                 unhandled_trap(state, "Load Page Fault");
245 }
246
247 static void
248 handle_fault_store(struct hw_trapframe *state)
249 {
250         if(in_kernel(state))
251         {
252                 print_trapframe(state);
253                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
254         }
255
256         set_current_ctx_hw(&per_cpu_info[core_id()], state);
257
258         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
259                 unhandled_trap(state, "Store Page Fault");
260 }
261
262 static void
263 handle_illegal_instruction(struct hw_trapframe *state)
264 {
265         assert(!in_kernel(state));
266
267         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
268         set_current_ctx_hw(pcpui, state);
269         if (emulate_fpu(state) == 0)
270         {
271                 advance_pc(&pcpui->cur_ctx->tf.hw_tf);
272                 return;
273         }
274
275         unhandled_trap(state, "Illegal Instruction");
276 }
277
278 static void
279 handle_syscall(struct hw_trapframe *state)
280 {
281         uintptr_t a0 = state->gpr[GPR_A0];
282         uintptr_t a1 = state->gpr[GPR_A1];
283
284         advance_pc(state);
285         set_current_ctx_hw(&per_cpu_info[core_id()], state);
286         enable_irq();
287         prep_syscalls(current, (struct syscall*)a0, a1);
288 }
289
290 static void
291 handle_breakpoint(struct hw_trapframe *state)
292 {
293         advance_pc(state);
294         monitor(state);
295 }
296
297 void
298 handle_trap(struct hw_trapframe *hw_tf)
299 {
300         static void (*const trap_handlers[])(struct hw_trapframe *) = {
301           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
302           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
303           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
304           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
305           [CAUSE_SYSCALL] = handle_syscall,
306           [CAUSE_BREAKPOINT] = handle_breakpoint,
307           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
308           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
309           [CAUSE_FAULT_LOAD] = handle_fault_load,
310           [CAUSE_FAULT_STORE] = handle_fault_store,
311         };
312
313         static void (*const irq_handlers[])(struct hw_trapframe *) = {
314           [IRQ_TIMER] = handle_timer_interrupt,
315           [IRQ_HOST] = handle_host_interrupt,
316           [IRQ_IPI] = handle_interprocessor_interrupt,
317         };
318
319         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
320         if (hw_tf->cause < 0)
321         {
322                 uint8_t irq = hw_tf->cause;
323                 assert(irq < sizeof(irq_handlers)/sizeof(irq_handlers[0]) &&
324                        irq_handlers[irq]);
325
326                 if (in_kernel(hw_tf))
327                         exit_halt_loop(hw_tf);
328                 else
329                         set_current_ctx_hw(&per_cpu_info[core_id()], hw_tf);
330
331                 inc_irq_depth(pcpui);
332                 irq_handlers[irq](hw_tf);
333                 dec_irq_depth(pcpui);
334         }
335         else
336         {
337                 assert(hw_tf->cause < sizeof(trap_handlers)/sizeof(trap_handlers[0]) &&
338                        trap_handlers[hw_tf->cause]);
339                 if (in_kernel(hw_tf)) {
340                         inc_ktrap_depth(pcpui);
341                         trap_handlers[hw_tf->cause](hw_tf);
342                         dec_ktrap_depth(pcpui);
343                 } else {
344                         trap_handlers[hw_tf->cause](hw_tf);
345                 }
346                 #warning "if a trap wasn't handled fully, like an MCP pf, reflect it
347                 reflect_unhandled_trap(hw_tf->tf_trapno, hw_tf->tf_err, aux);
348         }
349
350         extern void pop_hw_tf(struct hw_trapframe *tf); /* in asm */
351         /* Return to the current process, which should be runnable.  If we're the
352          * kernel, we should just return naturally.  Note that current and tf need
353          * to still be okay (might not be after blocking) */
354         if (in_kernel(hw_tf))
355                 pop_hw_tf(hw_tf);
356         else
357                 proc_restartcore();
358 }
359
360 /* We don't have NMIs now. */
361 void send_nmi(uint32_t os_coreid)
362 {
363         printk("%s not implemented\n", __FUNCTION);
364 }
365
366 int register_irq(int irq, isr_t handler, void *irq_arg, uint32_t tbdf)
367 {
368         printk("%s not implemented\n", __FUNCTION);
369         return -1;
370 }
371
372 int route_irqs(int cpu_vec, int coreid)
373 {
374         printk("%s not implemented\n", __FUNCTION);
375         return -1;
376 }
377
378 void __arch_reflect_trap_hwtf(struct hw_trapframe *hw_tf, unsigned int trap_nr,
379                               unsigned int err, unsigned long aux)
380 {
381         printk("%s not implemented\n", __FUNCTION);
382 }