Kernel context (IRQ, etc) tracking
[akaros.git] / kern / arch / riscv / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <string.h>
5 #include <process.h>
6 #include <syscall.h>
7 #include <monitor.h>
8 #include <manager.h>
9 #include <stdio.h>
10 #include <smp.h>
11 #include <slab.h>
12 #include <mm.h>
13 #include <umem.h>
14 #include <pmap.h>
15
16 /* These are the stacks the kernel will load when it receives a trap from user
17  * space.  The deal is that they get set right away in entry.S, and can always
18  * be used for finding the top of the stack (from which you should subtract the
19  * sizeof the trapframe.  Note, we need to have a junk value in the array so
20  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
21  * kernel_init(), which is after these values get set.
22  *
23  * TODO: if these end up becoming contended cache lines, move this to
24  * per_cpu_info. */
25 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
26
27 void
28 advance_pc(trapframe_t* state)
29 {
30         state->epc += 4;
31 }
32
33 /* Set stacktop for the current core to be the stack the kernel will start on
34  * when trapping/interrupting from userspace */
35 void set_stack_top(uintptr_t stacktop)
36 {
37         core_stacktops[core_id()] = stacktop;
38 }
39
40 /* Note the assertion assumes we are in the top page of the stack. */
41 uintptr_t get_stack_top(void)
42 {
43         register uintptr_t sp asm ("sp");
44         uintptr_t stacktop = core_stacktops[core_id()];
45         assert(ROUNDUP(sp, PGSIZE) == stacktop);
46         return stacktop;
47 }
48
49 void
50 idt_init(void)
51 {
52 }
53
54 void
55 sysenter_init(void)
56 {
57 }
58
59 /* Helper.  For now, this copies out the TF to pcpui, and sets cur_tf to point
60  * to it. */
61 static void
62 set_current_tf(struct per_cpu_info *pcpui, struct trapframe *tf)
63 {
64         if (irq_is_enabled())
65                 warn("Turn off IRQs until cur_tf is set!");
66         assert(!pcpui->cur_tf);
67         pcpui->actual_tf = *tf;
68         pcpui->cur_tf = &pcpui->actual_tf;
69 }
70
71 static int
72 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
73 {
74         // slightly hackish way to read out the instruction that faulted.
75         // not guaranteed to be right 100% of the time
76         uint32_t insn;
77         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->epc,4)))
78                 insn = -1;
79
80         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
81                            tf, core_id());
82         static const char* regnames[] = {
83           "z ", "ra", "v0", "v1", "a0", "a1", "a2", "a3",
84           "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
85           "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3",
86           "s4", "s5", "s6", "s7", "s8", "fp", "sp", "tp"
87         };
88         
89         tf->gpr[0] = 0;
90         
91         for(int i = 0; i < 32; i+=4)
92         {
93                 for(int j = 0; j < 4; j++)
94                         len += snprintf(buf+len, bufsz-len,
95                                         "%s %016lx%c", regnames[i+j], tf->gpr[i+j], 
96                                         j < 3 ? ' ' : '\n');
97         }
98         len += snprintf(buf+len, bufsz-len,
99                         "sr %016lx pc %016lx va %016lx insn       %08x\n",
100                                         tf->sr, tf->epc, tf->badvaddr, insn);
101
102         buf[bufsz-1] = 0;
103         return len;
104 }
105
106 void
107 print_trapframe(trapframe_t* tf)
108 {
109         char buf[1024];
110         int len = format_trapframe(tf,buf,sizeof(buf));
111         cputbuf(buf,len);
112 }
113 static void exit_halt_loop(trapframe_t* tf)
114 {
115         extern char after_cpu_halt;
116         if ((char*)tf->epc >= (char*)&cpu_halt && (char*)tf->epc < &after_cpu_halt)
117                 tf->epc = tf->gpr[1];
118 }
119
120 /* Assumes that any IPI you get is really a kernel message */
121 static void
122 handle_ipi(trapframe_t* tf)
123 {
124
125         if (!in_kernel(tf))
126                 set_current_tf(&per_cpu_info[core_id()], tf);
127         else
128                 exit_halt_loop(tf);
129         
130         clear_ipi();
131
132         handle_kmsg_ipi(tf, 0);
133 }
134
135 static void
136 unhandled_trap(trapframe_t* state, const char* name)
137 {
138         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
139         spin_lock(&screwup_lock);
140
141         if(in_kernel(state))
142         {
143                 print_trapframe(state);
144                 panic("Unhandled trap in kernel!\nTrap type: %s", name);
145         }
146         else
147         {
148                 char tf_buf[1024];
149                 format_trapframe(state, tf_buf, sizeof(tf_buf));
150
151                 warn("Unhandled trap in user!\nTrap type: %s\n%s", name, tf_buf);
152                 backtrace();
153                 spin_unlock(&screwup_lock);
154
155                 assert(current);
156                 enable_irq();
157                 proc_destroy(current);
158         }
159 }
160
161 static void
162 handle_timer_interrupt(trapframe_t* tf)
163 {
164         if (!in_kernel(tf))
165                 set_current_tf(&per_cpu_info[core_id()], tf);
166         else
167                 exit_halt_loop(tf);
168         
169         timer_interrupt(tf, NULL);
170 }
171
172 static void
173 handle_misaligned_fetch(trapframe_t* state)
174 {
175         unhandled_trap(state, "Misaligned Fetch");
176 }
177
178 static void
179 handle_misaligned_load(trapframe_t* state)
180 {
181         unhandled_trap(state, "Misaligned Load");
182 }
183
184 static void
185 handle_misaligned_store(trapframe_t* state)
186 {
187         unhandled_trap(state, "Misaligned Store");
188 }
189
190 static void
191 handle_fault_fetch(trapframe_t* state)
192 {
193         if(in_kernel(state))
194         {
195                 print_trapframe(state);
196                 panic("Instruction Page Fault in the Kernel at %p!", state->epc);
197         }
198
199         set_current_tf(&per_cpu_info[core_id()], state);
200
201         if(handle_page_fault(current, state->epc, PROT_EXEC))
202                 unhandled_trap(state, "Instruction Page Fault");
203 }
204
205 static void
206 handle_fault_load(trapframe_t* state)
207 {
208         if(in_kernel(state))
209         {
210                 print_trapframe(state);
211                 panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
212         }
213
214         set_current_tf(&per_cpu_info[core_id()], state);
215
216         if(handle_page_fault(current, state->badvaddr, PROT_READ))
217                 unhandled_trap(state, "Load Page Fault");
218 }
219
220 static void
221 handle_fault_store(trapframe_t* state)
222 {
223         if(in_kernel(state))
224         {
225                 print_trapframe(state);
226                 panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
227         }
228         
229         set_current_tf(&per_cpu_info[core_id()], state);
230
231         if(handle_page_fault(current, state->badvaddr, PROT_WRITE))
232                 unhandled_trap(state, "Store Page Fault");
233 }
234
235 static void
236 handle_illegal_instruction(trapframe_t* state)
237 {
238         set_current_tf(&per_cpu_info[core_id()], state);
239
240         if (emulate_fpu(state) == 0)
241         {
242                 advance_pc(per_cpu_info[core_id()].cur_tf);
243                 return;
244         }
245
246         unhandled_trap(state, "Illegal Instruction");
247 }
248
249 static void
250 handle_fp_disabled(trapframe_t* tf)
251 {
252         if(in_kernel(tf))
253                 panic("kernel executed an FP instruction!");
254
255         tf->sr |= SR_EF;
256         env_pop_tf(tf); /* We didn't save our TF, so don't use proc_restartcore */
257 }
258
259 static void
260 handle_syscall(trapframe_t* state)
261 {
262         uintptr_t a0 = state->gpr[4];
263         uintptr_t a1 = state->gpr[5];
264
265         advance_pc(state);
266         set_current_tf(&per_cpu_info[core_id()], state);
267         enable_irq();
268         prep_syscalls(current, (struct syscall*)a0, a1);
269 }
270
271 static void
272 handle_breakpoint(trapframe_t* state)
273 {
274         advance_pc(state);
275         monitor(state);
276 }
277
278 void
279 handle_trap(trapframe_t* tf)
280 {
281         static void (*const trap_handlers[])(trapframe_t*) = {
282           [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
283           [CAUSE_FAULT_FETCH] = handle_fault_fetch,
284           [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
285           [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
286           [CAUSE_FP_DISABLED] = handle_fp_disabled,
287           [CAUSE_SYSCALL] = handle_syscall,
288           [CAUSE_BREAKPOINT] = handle_breakpoint,
289           [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
290           [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
291           [CAUSE_FAULT_LOAD] = handle_fault_load,
292           [CAUSE_FAULT_STORE] = handle_fault_store,
293         };
294
295         static void (*const irq_handlers[])(trapframe_t*) = {
296           [IRQ_TIMER] = handle_timer_interrupt,
297           [IRQ_IPI] = handle_ipi,
298         };
299         
300         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
301         if (tf->cause < 0)
302         {
303                 uint8_t irq = tf->cause;
304                 assert(irq < sizeof(irq_handlers)/sizeof(irq_handlers[0]) &&
305                        irq_handlers[irq]);
306                 inc_irq_depth(pcpui);
307                 irq_handlers[irq](tf);
308                 dec_irq_depth(pcpui);
309         }
310         else
311         {
312                 assert(tf->cause < sizeof(trap_handlers)/sizeof(trap_handlers[0]) &&
313                        trap_handlers[tf->cause]);
314                 if (in_kernel(tf)) {
315                         inc_ktrap_depth(pcpui);
316                         trap_handlers[tf->cause](tf);
317                         dec_ktrap_depth(pcpui);
318                 } else {
319                         trap_handlers[tf->cause](tf);
320                 }
321         }
322         
323         /* Return to the current process, which should be runnable.  If we're the
324          * kernel, we should just return naturally.  Note that current and tf need
325          * to still be okay (might not be after blocking) */
326         if (in_kernel(tf))
327                 env_pop_tf(tf);
328         else
329                 proc_restartcore();
330 }
331
332 /* We don't have NMIs now. */
333 void send_nmi(uint32_t os_coreid)
334 {
335 }