Stub out kernel support for VM contexts (XCC)
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <trap.h>
4 #include <arch/console.h>
5 #include <console.h>
6 #include <string.h>
7 #include <process.h>
8 #include <syscall.h>
9 #include <monitor.h>
10 #include <manager.h>
11 #include <stdio.h>
12 #include <smp.h>
13 #include <slab.h>
14 #include <mm.h>
15 #include <umem.h>
16 #include <pmap.h>
17
18 /* These are the stacks the kernel will load when it receives a trap from user
19  * space.  The deal is that they get set right away in entry.S, and can always
20  * be used for finding the top of the stack (from which you should subtract the
21  * sizeof the trapframe.  Note, we need to have a junk value in the array so
22  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
23  * kernel_init(), which is after these values get set.
24  *
25  * TODO: if these end up becoming contended cache lines, move this to
26  * per_cpu_info. */
27 uintptr_t core_stacktops[MAX_NUM_CORES] = {0xcafebabe, 0};
28
29 void
30 advance_pc(struct hw_trapframe *state)
31 {
32         state->epc += 4;
33 }
34
35 /* Set stacktop for the current core to be the stack the kernel will start on
36  * when trapping/interrupting from userspace */
37 void set_stack_top(uintptr_t stacktop)
38 {
39         core_stacktops[core_id()] = stacktop;
40 }
41
42 /* Note the assertion assumes we are in the top page of the stack. */
43 uintptr_t get_stack_top(void)
44 {
45         register uintptr_t sp asm ("sp");
46         uintptr_t stacktop = core_stacktops[core_id()];
47         assert(ROUNDUP(sp, PGSIZE) == stacktop);
48         return stacktop;
49 }
50
51 void
52 idt_init(void)
53 {
54 }
55
56 /* Helper.  For now, this copies out the TF to pcpui, and sets cur_ctx to point
57  * to it. */
58 static void set_current_ctx_hw(struct per_cpu_info *pcpui,
59                                struct hw_trapframe *hw_tf)
60 {
61         if (irq_is_enabled())
62                 warn("Turn off IRQs until cur_ctx is set!");
63         assert(!pcpui->cur_ctx);
64         pcpui->actual_ctx.type = ROS_HW_CTX;
65         pcpui->actual_ctx.tf.hw_tf = *hw_tf;
66         pcpui->cur_ctx = &pcpui->actual_ctx;
67 }
68
69 static void set_current_ctx_sw(struct per_cpu_info *pcpui,
70                                struct sw_trapframe *sw_tf)
71 {
72         if (irq_is_enabled())
73                 warn("Turn off IRQs until cur_ctx is set!");
74         assert(!pcpui->cur_ctx);
75         pcpui->actual_ctx.type = ROS_SW_CTX;
76         pcpui->actual_ctx.tf.sw_tf = *sw_tf;
77         pcpui->cur_ctx = &pcpui->actual_ctx;
78 }
79
80 static int
81 format_trapframe(struct hw_trapframe *hw_tf, char* buf, int bufsz)
82 {
83         // slightly hackish way to read out the instruction that faulted.
84         // not guaranteed to be right 100% of the time
85         uint32_t insn;
86         if(!(current && !memcpy_from_user(current,&insn,(void*)hw_tf->epc,4)))
87                 insn = -1;
88
89         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
90                            hw_tf, core_id());
91         static const char* regnames[] = {
92           "z ", "ra", "s0", "s1", "s2", "s3", "s4", "s5",
93           "s6", "s7", "s8", "s9", "sA", "sB", "sp", "tp",
94           "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5",
95           "a6", "a7", "a8", "a9", "aA", "aB", "aC", "aD"
96         };
97         
98         hw_tf->gpr[0] = 0;
99         
100         for(int i = 0; i < 32; i+=4)
101         {
102                 for(int j = 0; j < 4; j++)
103                         len += snprintf(buf+len, bufsz-len,
104                                         "%s %016lx%c", regnames[i+j], hw_tf->gpr[i+j], 
105                                         j < 3 ? ' ' : '\n');
106         }
107         len += snprintf(buf+len, bufsz-len,
108                         "sr %016lx pc %016lx va %016lx insn       %08x\n",
109                                         hw_tf->sr, hw_tf->epc, hw_tf->badvaddr, insn);
110
111         buf[bufsz-1] = 0;
112         return len;
113 }
114
115 void print_trapframe(struct hw_trapframe *hw_tf)
116 {
117         char buf[1024];
118         int len = format_trapframe(hw_tf, buf, sizeof(buf));
119         cputbuf(buf,len);
120 }
121
122 void print_swtrapframe(struct sw_trapframe *sw_tf)
123 {
124         #warning "fix me"
125 }
126
127 void print_vmtrapframe(struct vm_trapframe *vm_tf)
128 {
129         #warning "fix me"
130 }
131
132 static void exit_halt_loop(struct hw_trapframe *hw_tf)
133 {
134         extern char after_cpu_halt;
135         if ((char*)hw_tf->epc >= (char*)&cpu_halt &&
136             (char*)hw_tf->epc < &after_cpu_halt)
137                 hw_tf->epc = hw_tf->gpr[GPR_RA];
138 }
139
140 static void handle_keypress(char c)
141 {
142         /* brho: not sure if this will work on riscv or not... */
143         #define capchar2ctl(x) ((x) - '@')
144         amr_t handler = c == capchar2ctl('G') ? __run_mon : __cons_add_char;
145         send_kernel_message(core_id(), handler, (long)&cons_buf, (long)c, 0,
146                             KMSG_ROUTINE);
147         cons_init();
148 }
149
150 static void handle_host_interrupt(struct hw_trapframe *hw_tf)
151 {
152         uintptr_t fh = mtpcr(PCR_FROMHOST, 0);
153         switch (fh >> 56)
154         {
155           case 0x00: return;
156           case 0x01: handle_keypress(fh); return;
157           default: assert(0);
158         }
159 }
160
161 static void handle_timer_interrupt(struct hw_trapframe *hw_tf)
162 {
163         timer_interrupt(hw_tf, NULL);
164 }
165
166 /* Assumes that any IPI you get is really a kernel message */
167 static void handle_interprocessor_interrupt(struct hw_trapframe *hw_tf)
168 {
169         clear_ipi();
170         handle_kmsg_ipi(hw_tf, 0);
171 }
172
173 static void
174 unhandled_trap(struct hw_trapframe *state, const char* name)
175 {
176         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
177         spin_lock(&screwup_lock);
178
179         if(in_kernel(state))
180         {
181                 print_trapframe(state);
182                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
183         }
184         else
185         {
186                 char tf_buf[1024];
187                 format_trapframe(state, tf_buf, sizeof(tf_buf));
188
189                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
190                 backtrace();
191                 spin_unlock(&screwup_lock);
192
193                 assert(current);
194                 proc_destroy(current);
195         }
196 }
197
198 static void
199 handle_misaligned_fetch(struct hw_trapframe *state)
200 {
201         unhandled_trap(state, "Misaligned Fetch");
202 }
203
204 static void
205 handle_misaligned_load(struct hw_trapframe *state)
206 {
207         unhandled_trap(state, "Misaligned Load");
208 }
209
210 static void
211 handle_misaligned_store(struct hw_trapframe *state)
212 {
213         unhandled_trap(state, "Misaligned Store");
214 }
215
216 static void
217 handle_fault_fetch(struct hw_trapframe *state)
218 {
219         if(in_kernel(state))
220         {
221                 print_trapframe(state);
222                 panic("Instruction Page Fault in the Kernel at %p!", state->epc);
223         }
224
225         set_current_ctx_hw(&per_cpu_info[core_id()], state);
226
227 #warning "returns EAGAIN if you should reflect the fault"
228         if(handle_page_fault(current, state->epc, PROT_EXEC))
229                 unhandled_trap(state, "Instruction Page Fault");
230 }
231
232 static void
233 handle_fault_load(struct hw_trapframe *state)
234 {
235         if(in_kernel(state))
236         {
237                 print_trapframe(state);
238                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
239         }
240
241         set_current_ctx_hw(&per_cpu_info[core_id()], state);
242
243 #warning "returns EAGAIN if you should reflect the fault"
244         if(handle_page_fault(current, state->badvaddr, PROT_READ))
245                 unhandled_trap(state, "Load Page Fault");
246 }
247
248 static void
249 handle_fault_store(struct hw_trapframe *state)
250 {
251         if(in_kernel(state))
252         {
253                 print_trapframe(state);
254                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
255         }
256
257         set_current_ctx_hw(&per_cpu_info[core_id()], state);
258
259         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
260                 unhandled_trap(state, "Store Page Fault");
261 }
262
263 static void
264 handle_illegal_instruction(struct hw_trapframe *state)
265 {
266         assert(!in_kernel(state));
267
268         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
269         set_current_ctx_hw(pcpui, state);
270         if (emulate_fpu(state) == 0)
271         {
272                 advance_pc(&pcpui->cur_ctx->tf.hw_tf);
273                 return;
274         }
275
276         unhandled_trap(state, "Illegal Instruction");
277 }
278
279 static void
280 handle_syscall(struct hw_trapframe *state)
281 {
282         uintptr_t a0 = state->gpr[GPR_A0];
283         uintptr_t a1 = state->gpr[GPR_A1];
284
285         advance_pc(state);
286         set_current_ctx_hw(&per_cpu_info[core_id()], state);
287         enable_irq();
288         prep_syscalls(current, (struct syscall*)a0, a1);
289 }
290
291 static void
292 handle_breakpoint(struct hw_trapframe *state)
293 {
294         advance_pc(state);
295         monitor(state);
296 }
297
298 void
299 handle_trap(struct hw_trapframe *hw_tf)
300 {
301         static void (*const trap_handlers[])(struct hw_trapframe *) = {
302           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
303           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
304           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
305           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
306           [CAUSE_SYSCALL] = handle_syscall,
307           [CAUSE_BREAKPOINT] = handle_breakpoint,
308           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
309           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
310           [CAUSE_FAULT_LOAD] = handle_fault_load,
311           [CAUSE_FAULT_STORE] = handle_fault_store,
312         };
313
314         static void (*const irq_handlers[])(struct hw_trapframe *) = {
315           [IRQ_TIMER] = handle_timer_interrupt,
316           [IRQ_HOST] = handle_host_interrupt,
317           [IRQ_IPI] = handle_interprocessor_interrupt,
318         };
319         
320         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
321         if (hw_tf->cause < 0)
322         {
323                 uint8_t irq = hw_tf->cause;
324                 assert(irq < sizeof(irq_handlers)/sizeof(irq_handlers[0]) &&
325                        irq_handlers[irq]);
326
327                 if (in_kernel(hw_tf))
328                         exit_halt_loop(hw_tf);
329                 else
330                         set_current_ctx_hw(&per_cpu_info[core_id()], hw_tf);
331
332                 inc_irq_depth(pcpui);
333                 irq_handlers[irq](hw_tf);
334                 dec_irq_depth(pcpui);
335         }
336         else
337         {
338                 assert(hw_tf->cause < sizeof(trap_handlers)/sizeof(trap_handlers[0]) &&
339                        trap_handlers[hw_tf->cause]);
340                 if (in_kernel(hw_tf)) {
341                         inc_ktrap_depth(pcpui);
342                         trap_handlers[hw_tf->cause](hw_tf);
343                         dec_ktrap_depth(pcpui);
344                 } else {
345                         trap_handlers[hw_tf->cause](hw_tf);
346                 }
347                 #warning "if a trap wasn't handled fully, like an MCP pf, reflect it
348                 reflect_unhandled_trap(hw_tf->tf_trapno, hw_tf->tf_err, aux);
349         }
350         
351         extern void pop_hw_tf(struct hw_trapframe *tf); /* in asm */
352         /* Return to the current process, which should be runnable.  If we're the
353          * kernel, we should just return naturally.  Note that current and tf need
354          * to still be okay (might not be after blocking) */
355         if (in_kernel(hw_tf))
356                 pop_hw_tf(hw_tf);
357         else
358                 proc_restartcore();
359 }
360
361 /* We don't have NMIs now. */
362 void send_nmi(uint32_t os_coreid)
363 {
364         printk("%s not implemented\n", __FUNCTION);
365 }
366
367 int register_irq(int irq, isr_t handler, void *irq_arg, uint32_t tbdf)
368 {
369         printk("%s not implemented\n", __FUNCTION);
370         return -1;
371 }
372
373 int route_irqs(int cpu_vec, int coreid)
374 {
375         printk("%s not implemented\n", __FUNCTION);
376         return -1;
377 }
378
379 void __arch_reflect_trap_hwtf(struct hw_trapframe *hw_tf, unsigned int trap_nr,
380                               unsigned int err, unsigned long aux)
381 {
382         printk("%s not implemented\n", __FUNCTION);
383 }