684fbea26d9521be7d22384ccc582a830ac781f8
[akaros.git] / kern / arch / sparc / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <string.h>
5 #include <process.h>
6 #include <syscall.h>
7 #include <monitor.h>
8 #include <manager.h>
9 #include <stdio.h>
10 #include <smp.h>
11 #include <slab.h>
12 #include <mm.h>
13 #include <ros/mman.h>
14 #include <pmap.h>
15
16 #ifdef __SHARC__
17 #pragma nosharc
18 #endif
19
20 #ifdef __DEPUTY__
21 #pragma nodeputy
22 #endif
23
24 struct kmem_cache *kernel_msg_cache;
25 void kernel_msg_init(void)
26 {
27         kernel_msg_cache = kmem_cache_create("kernel_msgs",
28                            sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
29 }
30
31 spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
32 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
33
34 /* This is mostly identical to x86's, minus the different send_ipi call. */
35 uint32_t send_kernel_message(uint32_t dst, amr_t pc,
36                              TV(a0t) arg0, TV(a1t) arg1, TV(a2t) arg2, int type)
37 {
38         kernel_message_t *k_msg;
39         assert(pc);
40         // note this will be freed on the destination core
41         k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
42         k_msg->srcid = core_id();
43         k_msg->pc = pc;
44         k_msg->arg0 = arg0;
45         k_msg->arg1 = arg1;
46         k_msg->arg2 = arg2;
47         switch (type) {
48                 case KMSG_IMMEDIATE:
49                         spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
50                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
51                         spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
52                         break;
53                 case KMSG_ROUTINE:
54                         spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
55                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
56                         spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
57                         break;
58                 default:
59                         panic("Unknown type of kernel message!");
60         }
61         send_ipi(dst);
62         return 0;
63 }
64
65 void
66 advance_pc(trapframe_t* state)
67 {
68         state->pc = state->npc;
69         state->npc += 4;
70 }
71
72 void
73 idt_init(void)
74 {
75 }
76
77 void
78 sysenter_init(void)
79 {
80 }
81
82 void
83 ( print_trapframe)(trapframe_t *tf)
84 {
85         int i, len;
86         char buf[1024];
87
88         len = snprintf(buf,sizeof(buf),"TRAP frame at %p on core %d\n",
89                        tf, core_id());
90
91         for(i = 0; i < 8; i++)
92         {
93                 len += snprintf(buf+len,sizeof(buf)-len,
94                                 "  g%d   0x%08x  o%d   0x%08x"
95                                 "  l%d   0x%08x  i%d 0x%08x\n",
96                                 i,tf->gpr[i],i,tf->gpr[i+8],
97                                 i,tf->gpr[i+16],i,tf->gpr[i+24]);
98         }
99
100         len += snprintf(buf+len,sizeof(buf)-len,
101                         "  psr  0x%08x  pc   0x%08x  npc  0x%08x  wim  0x%08x\n",
102                         tf->psr,tf->pc,tf->npc,tf->wim);
103         len += snprintf(buf+len,sizeof(buf)-len,
104                         "  y    0x%08x  insn 0x%08x  fsr  0x%08x  far  0x%08x\n",
105                         tf->y,tf->pc_insn,tf->fault_status,tf->fault_addr);
106         len += snprintf(buf+len,sizeof(buf)-len,
107                         "  timestamp  %21lld\n",tf->timestamp);
108
109         cprintf("%s",buf);
110 }
111
112 #define TRAPNAME_MAX    32
113
114 static char*
115 get_trapname(uint8_t tt, char buf[TRAPNAME_MAX])
116 {
117         static const char* trapnames[] = {
118                 [0x00] "reset",
119                 [0x01] "instruction access exception",
120                 [0x02] "illegal instruction",
121                 [0x03] "privileged instruction",
122                 [0x04] "floating point disabled",
123                 [0x05] "window overflow",
124                 [0x06] "window underflow",
125                 [0x07] "memory address not aligned",
126                 [0x08] "floating point exception",
127                 [0x09] "data access exception",
128                 [0x20] "register access error",
129                 [0x21] "instruction access error",
130                 [0x24] "coprocessor disabled",
131                 [0x25] "unimplemented FLUSH",
132                 [0x28] "coprocessor exception",
133                 [0x29] "data access error",
134                 [0x2A] "division by zero",
135                 [0x2B] "data store error",
136                 [0x2C] "data MMU miss",
137                 [0x3C] "instruction MMU miss"
138         };
139
140         if(tt >= 0x80)
141                 snprintf(buf,TRAPNAME_MAX,"user trap 0x%02x",tt);
142         else if(tt >= 0x10 && tt < 0x20)
143                 snprintf(buf,TRAPNAME_MAX,"interrupt 0x%x",tt-0x10);
144         else if(tt >= sizeof(trapnames)/sizeof(trapnames[0]) || !trapnames[tt])
145                 snprintf(buf,TRAPNAME_MAX,"(unknown trap 0x%02x)",tt);
146         else
147         {
148                 strncpy(buf,trapnames[tt],TRAPNAME_MAX);
149                 buf[TRAPNAME_MAX-1] = 0;
150         }
151
152         return buf;
153 }
154
155 /* Helper function.  Returns 0 if the list was empty. */
156 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
157                                        spinlock_t *list_lock)
158 {
159         kernel_message_t *k_msg;
160         spin_lock_irqsave(list_lock);
161         k_msg = STAILQ_FIRST(list_head);
162         if (k_msg)
163                 STAILQ_REMOVE_HEAD(list_head, link);
164         spin_unlock_irqsave(list_lock);
165         return k_msg;
166 }
167
168 /* Mostly the same as x86's implementation.  Keep them in sync.  This assumes
169  * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
170 void handle_ipi(trapframe_t* tf)
171 {
172         if (!in_kernel(tf))
173                 set_current_tf(tf);
174         else if((void*)tf->pc == &cpu_halt) // break out of the cpu_halt loop
175                 advance_pc(tf);
176
177         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
178         kernel_message_t msg_cp, *k_msg;
179
180         while (1) { // will break out when there are no more messages
181                 /* Try to get an immediate message.  Exec and free it. */
182                 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
183                 if (k_msg) {
184                         assert(k_msg->pc);
185                         k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
186                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
187                 } else { // no immediate, might be a routine
188                         if (in_kernel(tf))
189                                 return; // don't execute routine msgs if we were in the kernel
190                         k_msg = get_next_amsg(&myinfo->routine_amsgs,
191                                               &myinfo->routine_amsg_lock);
192                         if (!k_msg) // no routines either
193                                 return;
194                         /* copy in, and then free, in case we don't return */
195                         msg_cp = *k_msg;
196                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
197                         /* make sure an IPI is pending if we have more work */
198                         /* techincally, we don't need to lock when checking */
199                         if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
200                                 send_ipi(core_id());
201                         /* Execute the kernel message */
202                         assert(msg_cp.pc);
203                         msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
204                 }
205         }
206 }
207
208 /* Same as in x86.  Might be diff in the future if there is no way to check for
209  * immediate messages or there is the ability to selectively mask IPI vectors.*/
210 void process_routine_kmsg(void)
211 {
212         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
213         kernel_message_t msg_cp, *k_msg;
214         int8_t irq_state = 0;
215
216         disable_irqsave(&irq_state);
217         while (1) {
218                 /* normally, we want ints disabled, so we don't have an empty self-ipi
219                  * for every routine message. (imagine a long list of routines).  But we
220                  * do want immediates to run ahead of routines.  This enabling should
221                  * work (might not in some shitty VMs).  Also note we can receive an
222                  * extra self-ipi for routine messages before we turn off irqs again.
223                  * Not a big deal, since we will process it right away. */
224                 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
225                         enable_irq();
226                         cpu_relax();
227                         disable_irq();
228                 }
229                 k_msg = get_next_amsg(&myinfo->routine_amsgs,
230                                       &myinfo->routine_amsg_lock);
231                 if (!k_msg) {
232                         enable_irqsave(&irq_state);
233                         return;
234                 }
235                 /* copy in, and then free, in case we don't return */
236                 msg_cp = *k_msg;
237                 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
238                 /* make sure an IPI is pending if we have more work */
239                 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
240                         send_ipi(core_id());
241                 /* Execute the kernel message */
242                 assert(msg_cp.pc);
243                 msg_cp.pc(current_tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1,
244                           msg_cp.arg2);
245         }
246 }
247
248 void
249 unhandled_trap(trapframe_t* state)
250 {
251         char buf[TRAPNAME_MAX];
252         uint32_t trap_type = (state->tbr >> 4) & 0xFF;
253         get_trapname(trap_type,buf);
254
255         if(in_kernel(state))
256         {
257                 print_trapframe(state);
258                 panic("Unhandled trap in kernel!\nTrap type: %s",buf);
259         }
260         else
261         {
262                 warn("Unhandled trap in user!\nTrap type: %s",buf);
263                 print_trapframe(state);
264                 backtrace();
265
266                 assert(current);
267                 proc_incref(current, 1);
268                 proc_destroy(current);
269
270                 panic("I shouldn't have gotten here!");
271         }
272 }
273
274 static void
275 stack_fucked(trapframe_t* state)
276 {
277         // see if the problem arose when flushing out
278         // windows during handle_trap
279         extern uint32_t tflush;
280         if(state->pc == (uint32_t)&tflush)
281         {
282                 // the trap happened while flushing out windows.
283                 // hope this happened in the user, or else we're hosed...
284                 state = (trapframe_t*)(bootstacktop-SIZEOF_TRAPFRAME_T-core_id()*KSTKSIZE);
285         }
286
287         warn("You just got stack fucked!");
288         unhandled_trap(state);
289 }
290
291 void
292 fill_misaligned(trapframe_t* state)
293 {
294         state->tbr = (state->tbr & ~0xFFF) | 0x070;
295         stack_fucked(state);
296 }
297
298 void
299 fill_pagefault(trapframe_t* state)
300 {
301         state->tbr = (state->tbr & ~0xFFF) | 0x090;
302         stack_fucked(state);
303 }
304
305 void
306 stack_misaligned(trapframe_t* state)
307 {
308         state->tbr = (state->tbr & ~0xFFF) | 0x070;
309         stack_fucked(state);
310 }
311
312 void
313 stack_pagefault(trapframe_t* state)
314 {
315         state->tbr = (state->tbr & ~0xFFF) | 0x090;
316         stack_fucked(state);
317 }
318
319 void
320 address_unaligned(trapframe_t* state)
321 {
322         unhandled_trap(state);
323 }
324
325 void
326 instruction_access_exception(trapframe_t* state)
327 {
328         if(in_kernel(state) || handle_page_fault(current,state->pc,PROT_EXEC))
329                 unhandled_trap(state);
330 }
331
332 void
333 data_access_exception(trapframe_t* state)
334 {
335         int prot = (state->fault_status & MMU_FSR_WR) ? PROT_WRITE : PROT_READ;
336
337         if(in_kernel(state) || handle_page_fault(current,state->fault_addr,prot))
338                 unhandled_trap(state);
339 }
340
341 void
342 illegal_instruction(trapframe_t* state)
343 {
344         unhandled_trap(state);
345 }
346
347 void
348 real_fp_exception(trapframe_t* state, ancillary_state_t* sillystate)
349 {
350         unhandled_trap(state);
351 }
352
353 void
354 fp_exception(trapframe_t* state)
355 {
356         ancillary_state_t sillystate;
357         save_fp_state(&sillystate);     
358
359         // since our FP HW exception behavior is sketchy, reexecute
360         // any faulting FP instruction in SW, which may call
361         // real_fp_exception above
362         emulate_fpu(state,&sillystate);
363
364         restore_fp_state(&sillystate);
365 }
366
367 void
368 fp_disabled(trapframe_t* state)
369 {
370         if(in_kernel(state))
371                 panic("kernel executed an FP instruction!");
372
373         state->psr |= PSR_EF;
374 }
375
376 void
377 handle_syscall(trapframe_t* state)
378 {
379         uint32_t num = state->gpr[1];
380         uint32_t a1 = state->gpr[8];
381         uint32_t a2 = state->gpr[9];
382         uint32_t a3 = state->gpr[10];
383         uint32_t a4 = state->gpr[11];
384         uint32_t a5 = state->gpr[12];
385
386         advance_pc(state);
387
388         /* Note we are not preemptively saving the TF in the env_tf.  We do maintain
389          * a reference to it in current_tf (a per-cpu pointer).
390          * In general, only save the tf and any silly state once you know it
391          * is necessary (blocking).  And only save it in env_tf when you know you
392          * are single core (PROC_RUNNING_S) */
393         if (!in_kernel(state))
394                 set_current_tf(state);
395
396         // syscall code wants an edible reference for current
397         proc_incref(current, 1);
398         state->gpr[8] = syscall(current,num,a1,a2,a3,a4,a5);
399         proc_decref(current, 1);
400
401         proc_restartcore(current,state);
402 }
403
404 void
405 flush_windows()
406 {
407         register int foo asm("g1");
408         register int nwin asm("g2");
409         extern int NWINDOWS;
410
411         nwin = NWINDOWS;
412         foo = nwin;
413
414         asm volatile ("1: deccc %0; bne,a 1b; save %%sp,-64,%%sp"
415                       : "=r"(foo) : "r"(foo));
416
417         foo = nwin;
418         asm volatile ("1: deccc %0; bne,a 1b; restore"
419                       : "=r"(foo) : "r"(foo));
420 }
421    
422 void
423 handle_flushw(trapframe_t* state)
424 {
425         // don't actually need to do anything here.
426         // trap_entry flushes user windows to the stack.
427         advance_pc(state);
428 }
429
430 void
431 handle_breakpoint(trapframe_t* state)
432 {
433         advance_pc(state);
434         monitor(state);
435 }