1b364cab26b23990a29693a926eb2f4da3d0ecaa
[akaros.git] / kern / arch / sparc / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <string.h>
5 #include <process.h>
6 #include <syscall.h>
7 #include <monitor.h>
8 #include <manager.h>
9 #include <stdio.h>
10 #include <smp.h>
11 #include <slab.h>
12 #include <mm.h>
13 #include <ros/mman.h>
14 #include <pmap.h>
15
16 #ifdef __SHARC__
17 #pragma nosharc
18 #endif
19
20 #ifdef __DEPUTY__
21 #pragma nodeputy
22 #endif
23
24 struct kmem_cache *kernel_msg_cache;
25 void kernel_msg_init(void)
26 {
27         kernel_msg_cache = kmem_cache_create("kernel_msgs",
28                            sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
29 }
30
31 spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
32 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
33
34 /* This is mostly identical to x86's, minus the different send_ipi call. */
35 uint32_t send_kernel_message(uint32_t dst, amr_t pc,
36                              TV(a0t) arg0, TV(a1t) arg1, TV(a2t) arg2, int type)
37 {
38         kernel_message_t *k_msg;
39         assert(pc);
40         // note this will be freed on the destination core
41         k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
42         k_msg->srcid = core_id();
43         k_msg->pc = pc;
44         k_msg->arg0 = arg0;
45         k_msg->arg1 = arg1;
46         k_msg->arg2 = arg2;
47         switch (type) {
48                 case KMSG_IMMEDIATE:
49                         spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
50                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
51                         spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
52                         break;
53                 case KMSG_ROUTINE:
54                         spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
55                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
56                         spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
57                         break;
58                 default:
59                         panic("Unknown type of kernel message!");
60         }
61         send_ipi(dst);
62         return 0;
63 }
64
65 void
66 advance_pc(trapframe_t* state)
67 {
68         state->pc = state->npc;
69         state->npc += 4;
70 }
71
72 void
73 idt_init(void)
74 {
75 }
76
77 void
78 sysenter_init(void)
79 {
80 }
81
82 static int
83 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
84 {
85         // slightly hackish way to read out the instruction that faulted.
86         // not guaranteed to be right 100% of the time
87         uint32_t insn;
88         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->pc,4)))
89                 insn = -1;
90
91         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
92                            tf, core_id());
93
94         for(int i = 0; i < 8; i++)
95         {
96                 len += snprintf(buf+len,bufsz-len,
97                                 "  g%d   0x%08x  o%d   0x%08x"
98                                 "  l%d   0x%08x  i%d   0x%08x\n",
99                                 i,tf->gpr[i],i,tf->gpr[i+8],
100                                 i,tf->gpr[i+16],i,tf->gpr[i+24]);
101         }
102
103         len += snprintf(buf+len,bufsz-len,
104                         "  psr  0x%08x  pc   0x%08x  npc  0x%08x  insn 0x%08x\n",
105                         tf->psr,tf->pc,tf->npc,insn);
106         len += snprintf(buf+len,bufsz-len,
107                         "  y    0x%08x  fsr  0x%08x  far  0x%08x  tbr  0x%08x\n",
108                         tf->y,tf->fault_status,tf->fault_addr,tf->tbr);
109         len += snprintf(buf+len,bufsz-len,
110                         "  timestamp  %21lld\n",tf->timestamp);
111
112         return len;
113 }
114
115 void
116 print_trapframe(trapframe_t* tf)
117 {
118         char buf[1024];
119         int len = format_trapframe(tf,buf,sizeof(buf));
120         cputbuf(buf,len);
121 }
122
123 #define TRAPNAME_MAX    32
124
125 static char*
126 get_trapname(uint8_t tt, char buf[TRAPNAME_MAX])
127 {
128         static const char* trapnames[] = {
129                 [0x00] "reset",
130                 [0x01] "instruction access exception",
131                 [0x02] "illegal instruction",
132                 [0x03] "privileged instruction",
133                 [0x04] "floating point disabled",
134                 [0x05] "window overflow",
135                 [0x06] "window underflow",
136                 [0x07] "memory address not aligned",
137                 [0x08] "floating point exception",
138                 [0x09] "data access exception",
139                 [0x20] "register access error",
140                 [0x21] "instruction access error",
141                 [0x24] "coprocessor disabled",
142                 [0x25] "unimplemented FLUSH",
143                 [0x28] "coprocessor exception",
144                 [0x29] "data access error",
145                 [0x2A] "division by zero",
146                 [0x2B] "data store error",
147                 [0x2C] "data MMU miss",
148                 [0x3C] "instruction MMU miss"
149         };
150
151         if(tt >= 0x80)
152                 snprintf(buf,TRAPNAME_MAX,"user trap 0x%02x",tt);
153         else if(tt >= 0x10 && tt < 0x20)
154                 snprintf(buf,TRAPNAME_MAX,"interrupt 0x%x",tt-0x10);
155         else if(tt >= sizeof(trapnames)/sizeof(trapnames[0]) || !trapnames[tt])
156                 snprintf(buf,TRAPNAME_MAX,"(unknown trap 0x%02x)",tt);
157         else
158         {
159                 strncpy(buf,trapnames[tt],TRAPNAME_MAX);
160                 buf[TRAPNAME_MAX-1] = 0;
161         }
162
163         return buf;
164 }
165
166 /* Helper function.  Returns 0 if the list was empty. */
167 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
168                                        spinlock_t *list_lock)
169 {
170         kernel_message_t *k_msg;
171         spin_lock_irqsave(list_lock);
172         k_msg = STAILQ_FIRST(list_head);
173         if (k_msg)
174                 STAILQ_REMOVE_HEAD(list_head, link);
175         spin_unlock_irqsave(list_lock);
176         return k_msg;
177 }
178
179 /* Mostly the same as x86's implementation.  Keep them in sync.  This assumes
180  * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
181 void handle_ipi(trapframe_t* tf)
182 {
183         if (!in_kernel(tf))
184                 set_current_tf(tf);
185         else if((void*)tf->pc == &cpu_halt) // break out of the cpu_halt loop
186                 advance_pc(tf);
187
188         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
189         kernel_message_t msg_cp, *k_msg;
190
191         while (1) { // will break out when there are no more messages
192                 /* Try to get an immediate message.  Exec and free it. */
193                 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
194                 if (k_msg) {
195                         assert(k_msg->pc);
196                         k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
197                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
198                 } else { // no immediate, might be a routine
199                         if (in_kernel(tf))
200                                 return; // don't execute routine msgs if we were in the kernel
201                         k_msg = get_next_amsg(&myinfo->routine_amsgs,
202                                               &myinfo->routine_amsg_lock);
203                         if (!k_msg) // no routines either
204                                 return;
205                         /* copy in, and then free, in case we don't return */
206                         msg_cp = *k_msg;
207                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
208                         /* make sure an IPI is pending if we have more work */
209                         /* techincally, we don't need to lock when checking */
210                         if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
211                                 send_ipi(core_id());
212                         /* Execute the kernel message */
213                         assert(msg_cp.pc);
214                         msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
215                 }
216         }
217 }
218
219 /* Same as in x86.  Might be diff in the future if there is no way to check for
220  * immediate messages or there is the ability to selectively mask IPI vectors.*/
221 void process_routine_kmsg(void)
222 {
223         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
224         kernel_message_t msg_cp, *k_msg;
225         int8_t irq_state = 0;
226
227         disable_irqsave(&irq_state);
228         while (1) {
229                 /* normally, we want ints disabled, so we don't have an empty self-ipi
230                  * for every routine message. (imagine a long list of routines).  But we
231                  * do want immediates to run ahead of routines.  This enabling should
232                  * work (might not in some shitty VMs).  Also note we can receive an
233                  * extra self-ipi for routine messages before we turn off irqs again.
234                  * Not a big deal, since we will process it right away. */
235                 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
236                         enable_irq();
237                         cpu_relax();
238                         disable_irq();
239                 }
240                 k_msg = get_next_amsg(&myinfo->routine_amsgs,
241                                       &myinfo->routine_amsg_lock);
242                 if (!k_msg) {
243                         enable_irqsave(&irq_state);
244                         return;
245                 }
246                 /* copy in, and then free, in case we don't return */
247                 msg_cp = *k_msg;
248                 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
249                 /* make sure an IPI is pending if we have more work */
250                 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
251                         send_ipi(core_id());
252                 /* Execute the kernel message */
253                 assert(msg_cp.pc);
254                 msg_cp.pc(current_tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1,
255                           msg_cp.arg2);
256         }
257 }
258
259 void
260 unhandled_trap(trapframe_t* state)
261 {
262         char buf[TRAPNAME_MAX];
263         uint32_t trap_type = (state->tbr >> 4) & 0xFF;
264         get_trapname(trap_type,buf);
265
266         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
267         spin_lock(&screwup_lock);
268
269         if(in_kernel(state))
270         {
271                 print_trapframe(state);
272                 panic("Unhandled trap in kernel!\nTrap type: %s",buf);
273         }
274         else
275         {
276                 char tf_buf[1024];
277                 int tf_len = format_trapframe(state,tf_buf,sizeof(tf_buf));
278
279                 warn("Unhandled trap in user!\nTrap type: %s\n%s",buf,tf_buf);
280                 backtrace();
281                 spin_unlock(&screwup_lock);
282
283                 assert(current);
284                 proc_incref(current, 1);
285                 proc_destroy(current);
286
287                 panic("I shouldn't have gotten here!");
288         }
289 }
290
291 static trapframe_t*
292 stack_fucked(trapframe_t* state)
293 {
294         warn("You just got stack fucked!");
295         extern char tflush1, tflush2;
296         if(state->pc == (uint32_t)&tflush1 || state->pc == (uint32_t)&tflush2)
297                 return (trapframe_t*)(bootstacktop - core_id()*KSTKSIZE
298                                                    - sizeof(trapframe_t));
299         return state;
300 }
301
302 void
303 fill_misaligned(trapframe_t* state)
304 {
305         state = stack_fucked(state);
306         state->tbr = (state->tbr & ~0xFFF) | 0x070;
307         address_unaligned(state);
308 }
309
310 void
311 fill_pagefault(trapframe_t* state)
312 {
313         state = stack_fucked(state);
314         state->tbr = (state->tbr & ~0xFFF) | 0x090;
315         data_access_exception(state);
316 }
317
318 void
319 spill_misaligned(trapframe_t* state)
320 {
321         fill_misaligned(state);
322 }
323
324 void
325 spill_pagefault(trapframe_t* state)
326 {
327         fill_pagefault(state);
328 }
329
330 void
331 address_unaligned(trapframe_t* state)
332 {
333         unhandled_trap(state);
334 }
335
336 void
337 instruction_access_exception(trapframe_t* state)
338 {
339         if(in_kernel(state) || handle_page_fault(current,state->pc,PROT_EXEC))
340                 unhandled_trap(state);
341 }
342
343 void
344 data_access_exception(trapframe_t* state)
345 {
346         int prot = (state->fault_status & MMU_FSR_WR) ? PROT_WRITE : PROT_READ;
347
348         if(in_kernel(state) || handle_page_fault(current,state->fault_addr,prot))
349                 unhandled_trap(state);
350 }
351
352 void
353 illegal_instruction(trapframe_t* state)
354 {
355         unhandled_trap(state);
356 }
357
358 void
359 real_fp_exception(trapframe_t* state, ancillary_state_t* sillystate)
360 {
361         unhandled_trap(state);
362 }
363
364 void
365 fp_exception(trapframe_t* state)
366 {
367         ancillary_state_t sillystate;
368         save_fp_state(&sillystate);     
369
370         // since our FP HW exception behavior is sketchy, reexecute
371         // any faulting FP instruction in SW, which may call
372         // real_fp_exception above
373         emulate_fpu(state,&sillystate);
374
375         restore_fp_state(&sillystate);
376 }
377
378 void
379 fp_disabled(trapframe_t* state)
380 {
381         if(in_kernel(state))
382                 panic("kernel executed an FP instruction!");
383
384         state->psr |= PSR_EF;
385 }
386
387 void
388 handle_pop_tf(trapframe_t* state)
389 {
390         set_current_tf(state);
391
392         trapframe_t tf;
393         if(memcpy_from_user(current,&tf,(void*)state->gpr[8],sizeof(tf)))
394                 proc_destroy(current);
395
396         proc_secure_trapframe(&tf);
397         set_current_tf(&tf);
398         proc_restartcore(current,&tf);
399 }
400
401 void
402 handle_set_tf(trapframe_t* state)
403 {
404         advance_pc(state);
405         if(memcpy_to_user(current,(void*)state->gpr[8],state,sizeof(*state)))
406                 proc_destroy(current);
407 }
408
409 void
410 handle_syscall(trapframe_t* state)
411 {
412         uint32_t num = state->gpr[1];
413         uint32_t a1 = state->gpr[8];
414         uint32_t a2 = state->gpr[9];
415         uint32_t a3 = state->gpr[10];
416         uint32_t a4 = state->gpr[11];
417         uint32_t a5 = state->gpr[12];
418
419         advance_pc(state);
420         enable_irq();
421
422         set_current_tf(state);
423
424         // syscall code wants an edible reference for current
425         proc_incref(current, 1);
426         state->gpr[8] = syscall(current,num,a1,a2,a3,a4,a5);
427         proc_decref(current, 1);
428
429         proc_restartcore(current,state);
430 }
431
432 void
433 flush_windows()
434 {
435         register int foo asm("g1");
436         register int nwin asm("g2");
437         extern int NWINDOWS;
438
439         nwin = NWINDOWS;
440         foo = nwin;
441
442         asm volatile ("1: deccc %0; bne,a 1b; save %%sp,-64,%%sp"
443                       : "=r"(foo) : "r"(foo));
444
445         foo = nwin;
446         asm volatile ("1: deccc %0; bne,a 1b; restore"
447                       : "=r"(foo) : "r"(foo));
448 }
449    
450 void
451 handle_flushw(trapframe_t* state)
452 {
453         // don't actually need to do anything here.
454         // trap_entry flushes user windows to the stack.
455         advance_pc(state);
456 }
457
458 void
459 handle_breakpoint(trapframe_t* state)
460 {
461         advance_pc(state);
462         monitor(state);
463 }