Sparc's cpu_halt() enables interrupts
[akaros.git] / kern / arch / sparc / trap.c
1 #include <arch/arch.h>
2 #include <assert.h>
3 #include <arch/trap.h>
4 #include <string.h>
5 #include <process.h>
6 #include <syscall.h>
7 #include <monitor.h>
8 #include <manager.h>
9 #include <stdio.h>
10 #include <smp.h>
11 #include <slab.h>
12 #include <mm.h>
13 #include <umem.h>
14 #include <pmap.h>
15 #include <kdebug.h>
16
17 #ifdef __SHARC__
18 #pragma nosharc
19 #endif
20
21 #ifdef __DEPUTY__
22 #pragma nodeputy
23 #endif
24
25 /* These are the stacks the kernel will load when it receives a trap from user
26  * space.  The deal is that they get set right away in entry.S, and can always
27  * be used for finding the top of the stack (from which you should subtract the
28  * sizeof the trapframe.  Note, we need to have a junk value in the array so
29  * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
30  * kernel_init(), which is after these values get set.
31  *
32  * TODO: if these end up becoming contended cache lines, move this to
33  * per_cpu_info. */
34 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
35
36 struct kmem_cache *kernel_msg_cache;
37 void kernel_msg_init(void)
38 {
39         kernel_msg_cache = kmem_cache_create("kernel_msgs",
40                            sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
41 }
42
43 spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
44 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
45
46 /* This is mostly identical to x86's, minus the different send_ipi call. */
47 uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
48                              long arg2, int type)
49 {
50         kernel_message_t *k_msg;
51         assert(pc);
52         // note this will be freed on the destination core
53         k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
54         k_msg->srcid = core_id();
55         k_msg->pc = pc;
56         k_msg->arg0 = arg0;
57         k_msg->arg1 = arg1;
58         k_msg->arg2 = arg2;
59         switch (type) {
60                 case KMSG_IMMEDIATE:
61                         spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
62                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
63                         spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
64                         break;
65                 case KMSG_ROUTINE:
66                         spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
67                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
68                         spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
69                         break;
70                 default:
71                         panic("Unknown type of kernel message!");
72         }
73         /* if we're sending a routine message locally, we don't want/need an IPI */
74         if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
75                 send_ipi(dst);
76         return 0;
77 }
78
79 void
80 advance_pc(trapframe_t* state)
81 {
82         state->pc = state->npc;
83         state->npc += 4;
84 }
85
86 /* Set stacktop for the current core to be the stack the kernel will start on
87  * when trapping/interrupting from userspace */
88 void set_stack_top(uintptr_t stacktop)
89 {
90         core_stacktops[core_id()] = stacktop;
91 }
92
93 /* Note the assertion assumes we are in the top page of the stack. */
94 uintptr_t get_stack_top(void)
95 {
96         uintptr_t sp, stacktop;
97         stacktop = core_stacktops[core_id()];
98         asm volatile("mov %%sp,%0" : "=r"(sp));
99         assert(ROUNDUP(sp, PGSIZE) == stacktop);
100         return stacktop;
101 }
102
103 /* Starts running the current TF. */
104 void pop_kernel_tf(struct trapframe *tf)
105 {
106         /* TODO! also do save_kernel_tf() in kern/arch/sparc/trap.h */
107         panic("Not implemented.  =(");
108 }
109
110 /* Does nothing on sparc... */
111 void send_nmi(uint32_t os_coreid)
112 {
113 }
114
115 void
116 idt_init(void)
117 {
118 }
119
120 void
121 sysenter_init(void)
122 {
123 }
124
125 /* Helper.  For now, this copies out the TF to pcpui, and sets the tf to use it.
126  * Eventually, we ought to do this in trap_entry.S.  Honestly, do whatever you
127  * want with this.  The **tf is for convenience in x86. */
128 static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
129 {
130         pcpui->actual_tf = **tf;
131         pcpui->cur_tf = &pcpui->actual_tf;
132         *tf = &pcpui->actual_tf;
133 }
134
135 static int
136 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
137 {
138         // slightly hackish way to read out the instruction that faulted.
139         // not guaranteed to be right 100% of the time
140         uint32_t insn;
141         if(!(current && !memcpy_from_user(current,&insn,(void*)tf->pc,4)))
142                 insn = -1;
143
144         int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
145                            tf, core_id());
146
147         for(int i = 0; i < 8; i++)
148         {
149                 len += snprintf(buf+len,bufsz-len,
150                                 "  g%d   0x%08x  o%d   0x%08x"
151                                 "  l%d   0x%08x  i%d   0x%08x\n",
152                                 i,tf->gpr[i],i,tf->gpr[i+8],
153                                 i,tf->gpr[i+16],i,tf->gpr[i+24]);
154         }
155
156         len += snprintf(buf+len,bufsz-len,
157                         "  psr  0x%08x  pc   0x%08x  npc  0x%08x  insn 0x%08x\n",
158                         tf->psr,tf->pc,tf->npc,insn);
159         len += snprintf(buf+len,bufsz-len,
160                         "  y    0x%08x  fsr  0x%08x  far  0x%08x  tbr  0x%08x\n",
161                         tf->y,tf->fault_status,tf->fault_addr,tf->tbr);
162         len += snprintf(buf+len,bufsz-len,
163                         "  timestamp  %21lld\n",tf->timestamp);
164
165         return len;
166 }
167
168 void
169 print_trapframe(trapframe_t* tf)
170 {
171         char buf[1024];
172         int len = format_trapframe(tf,buf,sizeof(buf));
173         cputbuf(buf,len);
174 }
175
176 #define TRAPNAME_MAX    32
177
178 static char*
179 get_trapname(uint8_t tt, char buf[TRAPNAME_MAX])
180 {
181         static const char* trapnames[] = {
182                 [0x00] "reset",
183                 [0x01] "instruction access exception",
184                 [0x02] "illegal instruction",
185                 [0x03] "privileged instruction",
186                 [0x04] "floating point disabled",
187                 [0x05] "window overflow",
188                 [0x06] "window underflow",
189                 [0x07] "memory address not aligned",
190                 [0x08] "floating point exception",
191                 [0x09] "data access exception",
192                 [0x20] "register access error",
193                 [0x21] "instruction access error",
194                 [0x24] "coprocessor disabled",
195                 [0x25] "unimplemented FLUSH",
196                 [0x28] "coprocessor exception",
197                 [0x29] "data access error",
198                 [0x2A] "division by zero",
199                 [0x2B] "data store error",
200                 [0x2C] "data MMU miss",
201                 [0x3C] "instruction MMU miss"
202         };
203
204         if(tt >= 0x80)
205                 snprintf(buf,TRAPNAME_MAX,"user trap 0x%02x",tt);
206         else if(tt >= 0x10 && tt < 0x20)
207                 snprintf(buf,TRAPNAME_MAX,"interrupt 0x%x",tt-0x10);
208         else if(tt >= sizeof(trapnames)/sizeof(trapnames[0]) || !trapnames[tt])
209                 snprintf(buf,TRAPNAME_MAX,"(unknown trap 0x%02x)",tt);
210         else
211         {
212                 strncpy(buf,trapnames[tt],TRAPNAME_MAX);
213                 buf[TRAPNAME_MAX-1] = 0;
214         }
215
216         return buf;
217 }
218
219 /* Helper function.  Returns 0 if the list was empty. */
220 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
221                                        spinlock_t *list_lock)
222 {
223         kernel_message_t *k_msg;
224         spin_lock_irqsave(list_lock);
225         k_msg = STAILQ_FIRST(list_head);
226         if (k_msg)
227                 STAILQ_REMOVE_HEAD(list_head, link);
228         spin_unlock_irqsave(list_lock);
229         return k_msg;
230 }
231
232 /* Mostly the same as x86's implementation.  Keep them in sync.  This assumes
233  * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
234 void handle_ipi(trapframe_t* tf)
235 {
236         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
237         if (!in_kernel(tf))
238                 set_current_tf(pcpui, &tf);
239         else if((void*)tf->pc == &__cpu_halt) // break out of the __cpu_halt loop
240                 advance_pc(tf);
241
242         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
243         kernel_message_t msg_cp, *k_msg;
244
245         while (1) { // will break out when there are no more messages
246                 /* Try to get an immediate message.  Exec and free it. */
247                 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
248                 if (k_msg) {
249                         assert(k_msg->pc);
250                         k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
251                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
252                 } else { // no immediate, might be a routine
253                         if (in_kernel(tf))
254                                 return; // don't execute routine msgs if we were in the kernel
255                         k_msg = get_next_amsg(&myinfo->routine_amsgs,
256                                               &myinfo->routine_amsg_lock);
257                         if (!k_msg) // no routines either
258                                 return;
259                         /* copy in, and then free, in case we don't return */
260                         msg_cp = *k_msg;
261                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
262                         /* make sure an IPI is pending if we have more work */
263                         /* techincally, we don't need to lock when checking */
264                         if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
265                                 send_ipi(core_id());
266                         /* Execute the kernel message */
267                         assert(msg_cp.pc);
268                         msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
269                 }
270         }
271 }
272
273 /* Same as in x86.  Might be diff in the future if there is no way to check for
274  * immediate messages or there is the ability to selectively mask IPI vectors.*/
275 void process_routine_kmsg(struct trapframe *tf)
276 {
277         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
278         kernel_message_t msg_cp, *k_msg;
279         int8_t irq_state = 0;
280
281         disable_irqsave(&irq_state);
282         /* If we were told what our TF was, use that.  o/w, go with current_tf. */
283         tf = tf ? tf : current_tf;
284         while (1) {
285                 /* normally, we want ints disabled, so we don't have an empty self-ipi
286                  * for every routine message. (imagine a long list of routines).  But we
287                  * do want immediates to run ahead of routines.  This enabling should
288                  * work (might not in some shitty VMs).  Also note we can receive an
289                  * extra self-ipi for routine messages before we turn off irqs again.
290                  * Not a big deal, since we will process it right away. */
291                 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
292                         enable_irq();
293                         cpu_relax();
294                         disable_irq();
295                 }
296                 k_msg = get_next_amsg(&myinfo->routine_amsgs,
297                                       &myinfo->routine_amsg_lock);
298                 if (!k_msg) {
299                         enable_irqsave(&irq_state);
300                         return;
301                 }
302                 /* copy in, and then free, in case we don't return */
303                 msg_cp = *k_msg;
304                 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
305                 /* make sure an IPI is pending if we have more work */
306                 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
307                         send_ipi(core_id());
308                 /* Execute the kernel message */
309                 assert(msg_cp.pc);
310                 msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
311         }
312 }
313
314 void
315 unhandled_trap(trapframe_t* state)
316 {
317         char buf[TRAPNAME_MAX];
318         uint32_t trap_type = (state->tbr >> 4) & 0xFF;
319         get_trapname(trap_type,buf);
320
321         static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
322         spin_lock(&screwup_lock);
323
324         if(in_kernel(state))
325         {
326                 print_trapframe(state);
327                 panic("Unhandled trap in kernel!\nTrap type: %s",buf);
328         }
329         else
330         {
331                 char tf_buf[1024];
332                 int tf_len = format_trapframe(state,tf_buf,sizeof(tf_buf));
333
334                 warn("Unhandled trap in user!\nTrap type: %s\n%s",buf,tf_buf);
335                 backtrace();
336                 spin_unlock(&screwup_lock);
337
338                 assert(current);
339                 proc_incref(current, 1);
340                 proc_destroy(current);
341
342                 panic("I shouldn't have gotten here!");
343         }
344 }
345
346 static trapframe_t*
347 stack_fucked(trapframe_t* state)
348 {
349         warn("You just got stack fucked!");
350         extern char tflush1, tflush2;
351         if(state->pc == (uint32_t)&tflush1 || state->pc == (uint32_t)&tflush2)
352                 return (trapframe_t*)(bootstacktop - core_id()*KSTKSIZE
353                                                    - sizeof(trapframe_t));
354         return state;
355 }
356
357 void
358 fill_misaligned(trapframe_t* state)
359 {
360         state = stack_fucked(state);
361         state->tbr = (state->tbr & ~0xFFF) | 0x070;
362         address_unaligned(state);
363 }
364
365 void
366 fill_pagefault(trapframe_t* state)
367 {
368         state = stack_fucked(state);
369         state->tbr = (state->tbr & ~0xFFF) | 0x090;
370         data_access_exception(state);
371 }
372
373 void
374 spill_misaligned(trapframe_t* state)
375 {
376         fill_misaligned(state);
377 }
378
379 void
380 spill_pagefault(trapframe_t* state)
381 {
382         fill_pagefault(state);
383 }
384
385 void
386 address_unaligned(trapframe_t* state)
387 {
388         unhandled_trap(state);
389 }
390
391 void
392 instruction_access_exception(trapframe_t* state)
393 {
394         if(in_kernel(state) || handle_page_fault(current,state->pc,PROT_EXEC))
395                 unhandled_trap(state);
396 }
397
398 void
399 data_access_exception(trapframe_t* state)
400 {
401         int prot = (state->fault_status & MMU_FSR_WR) ? PROT_WRITE : PROT_READ;
402
403         if(in_kernel(state) || handle_page_fault(current,state->fault_addr,prot))
404                 unhandled_trap(state);
405 }
406
407 void
408 illegal_instruction(trapframe_t* state)
409 {
410         unhandled_trap(state);
411 }
412
413 void
414 real_fp_exception(trapframe_t* state, ancillary_state_t* sillystate)
415 {
416         unhandled_trap(state);
417 }
418
419 void
420 fp_exception(trapframe_t* state)
421 {
422         ancillary_state_t sillystate;
423         save_fp_state(&sillystate);     
424
425         // since our FP HW exception behavior is sketchy, reexecute
426         // any faulting FP instruction in SW, which may call
427         // real_fp_exception above
428         emulate_fpu(state,&sillystate);
429
430         restore_fp_state(&sillystate);
431 }
432
433 void
434 fp_disabled(trapframe_t* state)
435 {
436         if(in_kernel(state))
437                 panic("kernel executed an FP instruction!");
438
439         state->psr |= PSR_EF;
440 }
441
442 void
443 handle_pop_tf(trapframe_t* state)
444 {
445         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
446         set_current_tf(pcpui, &state);
447
448         trapframe_t tf, *tf_p = &tf;
449         if (memcpy_from_user(current,&tf,(void*)state->gpr[8],sizeof(tf))) {
450                 proc_incref(current, 1);
451                 proc_destroy(current);
452                 assert(0);
453         }
454
455         proc_secure_trapframe(&tf);
456         set_current_tf(pcpui, &tf_p);
457         proc_restartcore();
458 }
459
460 void
461 handle_set_tf(trapframe_t* state)
462 {
463         advance_pc(state);
464         if (memcpy_to_user(current,(void*)state->gpr[8],state,sizeof(*state))) {
465                 proc_incref(current, 1);
466                 proc_destroy(current);
467                 assert(0);
468         }
469 }
470
471 void
472 handle_syscall(trapframe_t* state)
473 {
474         struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
475         uint32_t a0 = state->gpr[1];
476         uint32_t a1 = state->gpr[8];
477
478         advance_pc(state);
479         enable_irq();
480         struct per_cpu_info* coreinfo = &per_cpu_info[core_id()];
481
482         set_current_tf(pcpui, &state);
483
484         prep_syscalls(current, (struct syscall*)a0, a1);
485
486         proc_restartcore();
487 }
488
489 void
490 flush_windows()
491 {
492         register int foo asm("g1");
493         register int nwin asm("g2");
494         extern int NWINDOWS;
495
496         nwin = NWINDOWS;
497         foo = nwin;
498
499         asm volatile ("1: deccc %0; bne,a 1b; save %%sp,-64,%%sp"
500                       : "=r"(foo) : "r"(foo));
501
502         foo = nwin;
503         asm volatile ("1: deccc %0; bne,a 1b; restore"
504                       : "=r"(foo) : "r"(foo));
505 }
506    
507 void
508 handle_flushw(trapframe_t* state)
509 {
510         // don't actually need to do anything here.
511         // trap_entry flushes user windows to the stack.
512         advance_pc(state);
513 }
514
515 void
516 handle_breakpoint(trapframe_t* state)
517 {
518         advance_pc(state);
519         monitor(state);
520 }