25 /* These are the stacks the kernel will load when it receives a trap from user
26 * space. The deal is that they get set right away in entry.S, and can always
27 * be used for finding the top of the stack (from which you should subtract the
28 * sizeof the trapframe. Note, we need to have a junk value in the array so
29 * that this is NOT part of the BSS. If it is in the BSS, it will get 0'd in
30 * kernel_init(), which is after these values get set.
32 * TODO: if these end up becoming contended cache lines, move this to
34 uintptr_t core_stacktops[MAX_NUM_CPUS] = {0xcafebabe, 0};
36 struct kmem_cache *kernel_msg_cache;
37 void kernel_msg_init(void)
39 kernel_msg_cache = kmem_cache_create("kernel_msgs",
40 sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
43 spinlock_t kernel_message_buf_busy[MAX_NUM_CPUS] = {SPINLOCK_INITIALIZER};
44 kernel_message_t kernel_message_buf[MAX_NUM_CPUS];
46 /* This is mostly identical to x86's, minus the different send_ipi call. */
47 uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
50 kernel_message_t *k_msg;
52 // note this will be freed on the destination core
53 k_msg = (kernel_message_t *CT(1))TC(kmem_cache_alloc(kernel_msg_cache, 0));
54 k_msg->srcid = core_id();
61 spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
62 STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
63 spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
66 spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
67 STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
68 spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
71 panic("Unknown type of kernel message!");
73 /* if we're sending a routine message locally, we don't want/need an IPI */
74 if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
80 advance_pc(trapframe_t* state)
82 state->pc = state->npc;
86 /* Set stacktop for the current core to be the stack the kernel will start on
87 * when trapping/interrupting from userspace */
88 void set_stack_top(uintptr_t stacktop)
90 core_stacktops[core_id()] = stacktop;
93 /* Note the assertion assumes we are in the top page of the stack. */
94 uintptr_t get_stack_top(void)
96 uintptr_t sp, stacktop;
97 stacktop = core_stacktops[core_id()];
98 asm volatile("mov %%sp,%0" : "=r"(sp));
99 assert(ROUNDUP(sp, PGSIZE) == stacktop);
103 /* Starts running the current TF. */
104 void pop_kernel_tf(struct trapframe *tf)
106 /* TODO! also do save_kernel_tf() in kern/arch/sparc/trap.h */
107 panic("Not implemented. =(");
110 /* Does nothing on sparc... */
111 void send_nmi(uint32_t os_coreid)
125 /* Helper. For now, this copies out the TF to pcpui, and sets the tf to use it.
126 * Eventually, we ought to do this in trap_entry.S. Honestly, do whatever you
127 * want with this. The **tf is for convenience in x86. */
128 static void set_current_tf(struct per_cpu_info *pcpui, struct trapframe **tf)
130 pcpui->actual_tf = **tf;
131 pcpui->cur_tf = &pcpui->actual_tf;
132 *tf = &pcpui->actual_tf;
136 format_trapframe(trapframe_t *tf, char* buf, int bufsz)
138 // slightly hackish way to read out the instruction that faulted.
139 // not guaranteed to be right 100% of the time
141 if(!(current && !memcpy_from_user(current,&insn,(void*)tf->pc,4)))
144 int len = snprintf(buf,bufsz,"TRAP frame at %p on core %d\n",
147 for(int i = 0; i < 8; i++)
149 len += snprintf(buf+len,bufsz-len,
150 " g%d 0x%08x o%d 0x%08x"
151 " l%d 0x%08x i%d 0x%08x\n",
152 i,tf->gpr[i],i,tf->gpr[i+8],
153 i,tf->gpr[i+16],i,tf->gpr[i+24]);
156 len += snprintf(buf+len,bufsz-len,
157 " psr 0x%08x pc 0x%08x npc 0x%08x insn 0x%08x\n",
158 tf->psr,tf->pc,tf->npc,insn);
159 len += snprintf(buf+len,bufsz-len,
160 " y 0x%08x fsr 0x%08x far 0x%08x tbr 0x%08x\n",
161 tf->y,tf->fault_status,tf->fault_addr,tf->tbr);
162 len += snprintf(buf+len,bufsz-len,
163 " timestamp %21lld\n",tf->timestamp);
169 print_trapframe(trapframe_t* tf)
172 int len = format_trapframe(tf,buf,sizeof(buf));
176 #define TRAPNAME_MAX 32
179 get_trapname(uint8_t tt, char buf[TRAPNAME_MAX])
181 static const char* trapnames[] = {
183 [0x01] "instruction access exception",
184 [0x02] "illegal instruction",
185 [0x03] "privileged instruction",
186 [0x04] "floating point disabled",
187 [0x05] "window overflow",
188 [0x06] "window underflow",
189 [0x07] "memory address not aligned",
190 [0x08] "floating point exception",
191 [0x09] "data access exception",
192 [0x20] "register access error",
193 [0x21] "instruction access error",
194 [0x24] "coprocessor disabled",
195 [0x25] "unimplemented FLUSH",
196 [0x28] "coprocessor exception",
197 [0x29] "data access error",
198 [0x2A] "division by zero",
199 [0x2B] "data store error",
200 [0x2C] "data MMU miss",
201 [0x3C] "instruction MMU miss"
205 snprintf(buf,TRAPNAME_MAX,"user trap 0x%02x",tt);
206 else if(tt >= 0x10 && tt < 0x20)
207 snprintf(buf,TRAPNAME_MAX,"interrupt 0x%x",tt-0x10);
208 else if(tt >= sizeof(trapnames)/sizeof(trapnames[0]) || !trapnames[tt])
209 snprintf(buf,TRAPNAME_MAX,"(unknown trap 0x%02x)",tt);
212 strncpy(buf,trapnames[tt],TRAPNAME_MAX);
213 buf[TRAPNAME_MAX-1] = 0;
219 /* Helper function. Returns 0 if the list was empty. */
220 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
221 spinlock_t *list_lock)
223 kernel_message_t *k_msg;
224 spin_lock_irqsave(list_lock);
225 k_msg = STAILQ_FIRST(list_head);
227 STAILQ_REMOVE_HEAD(list_head, link);
228 spin_unlock_irqsave(list_lock);
232 /* Mostly the same as x86's implementation. Keep them in sync. This assumes
233 * you can send yourself an IPI, and that IPIs can get squashed like on x86. */
234 void handle_ipi(trapframe_t* tf)
236 struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
238 set_current_tf(pcpui, &tf);
239 else if((void*)tf->pc == &__cpu_halt) // break out of the __cpu_halt loop
242 per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
243 kernel_message_t msg_cp, *k_msg;
245 while (1) { // will break out when there are no more messages
246 /* Try to get an immediate message. Exec and free it. */
247 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
250 k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
251 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
252 } else { // no immediate, might be a routine
254 return; // don't execute routine msgs if we were in the kernel
255 k_msg = get_next_amsg(&myinfo->routine_amsgs,
256 &myinfo->routine_amsg_lock);
257 if (!k_msg) // no routines either
259 /* copy in, and then free, in case we don't return */
261 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
262 /* make sure an IPI is pending if we have more work */
263 /* techincally, we don't need to lock when checking */
264 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
266 /* Execute the kernel message */
268 msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
273 /* Same as in x86. Might be diff in the future if there is no way to check for
274 * immediate messages or there is the ability to selectively mask IPI vectors.*/
275 void process_routine_kmsg(struct trapframe *tf)
277 per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
278 kernel_message_t msg_cp, *k_msg;
279 int8_t irq_state = 0;
281 disable_irqsave(&irq_state);
282 /* If we were told what our TF was, use that. o/w, go with current_tf. */
283 tf = tf ? tf : current_tf;
285 /* normally, we want ints disabled, so we don't have an empty self-ipi
286 * for every routine message. (imagine a long list of routines). But we
287 * do want immediates to run ahead of routines. This enabling should
288 * work (might not in some shitty VMs). Also note we can receive an
289 * extra self-ipi for routine messages before we turn off irqs again.
290 * Not a big deal, since we will process it right away. */
291 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
296 k_msg = get_next_amsg(&myinfo->routine_amsgs,
297 &myinfo->routine_amsg_lock);
299 enable_irqsave(&irq_state);
302 /* copy in, and then free, in case we don't return */
304 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
305 /* make sure an IPI is pending if we have more work */
306 if (!STAILQ_EMPTY(&myinfo->routine_amsgs))
308 /* Execute the kernel message */
310 msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
315 unhandled_trap(trapframe_t* state)
317 char buf[TRAPNAME_MAX];
318 uint32_t trap_type = (state->tbr >> 4) & 0xFF;
319 get_trapname(trap_type,buf);
321 static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
322 spin_lock(&screwup_lock);
326 print_trapframe(state);
327 panic("Unhandled trap in kernel!\nTrap type: %s",buf);
332 int tf_len = format_trapframe(state,tf_buf,sizeof(tf_buf));
334 warn("Unhandled trap in user!\nTrap type: %s\n%s",buf,tf_buf);
336 spin_unlock(&screwup_lock);
339 proc_incref(current, 1);
340 proc_destroy(current);
342 panic("I shouldn't have gotten here!");
347 stack_fucked(trapframe_t* state)
349 warn("You just got stack fucked!");
350 extern char tflush1, tflush2;
351 if(state->pc == (uint32_t)&tflush1 || state->pc == (uint32_t)&tflush2)
352 return (trapframe_t*)(bootstacktop - core_id()*KSTKSIZE
353 - sizeof(trapframe_t));
358 fill_misaligned(trapframe_t* state)
360 state = stack_fucked(state);
361 state->tbr = (state->tbr & ~0xFFF) | 0x070;
362 address_unaligned(state);
366 fill_pagefault(trapframe_t* state)
368 state = stack_fucked(state);
369 state->tbr = (state->tbr & ~0xFFF) | 0x090;
370 data_access_exception(state);
374 spill_misaligned(trapframe_t* state)
376 fill_misaligned(state);
380 spill_pagefault(trapframe_t* state)
382 fill_pagefault(state);
386 address_unaligned(trapframe_t* state)
388 unhandled_trap(state);
392 instruction_access_exception(trapframe_t* state)
394 if(in_kernel(state) || handle_page_fault(current,state->pc,PROT_EXEC))
395 unhandled_trap(state);
399 data_access_exception(trapframe_t* state)
401 int prot = (state->fault_status & MMU_FSR_WR) ? PROT_WRITE : PROT_READ;
403 if(in_kernel(state) || handle_page_fault(current,state->fault_addr,prot))
404 unhandled_trap(state);
408 illegal_instruction(trapframe_t* state)
410 unhandled_trap(state);
414 real_fp_exception(trapframe_t* state, ancillary_state_t* sillystate)
416 unhandled_trap(state);
420 fp_exception(trapframe_t* state)
422 ancillary_state_t sillystate;
423 save_fp_state(&sillystate);
425 // since our FP HW exception behavior is sketchy, reexecute
426 // any faulting FP instruction in SW, which may call
427 // real_fp_exception above
428 emulate_fpu(state,&sillystate);
430 restore_fp_state(&sillystate);
434 fp_disabled(trapframe_t* state)
437 panic("kernel executed an FP instruction!");
439 state->psr |= PSR_EF;
443 handle_pop_tf(trapframe_t* state)
445 struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
446 set_current_tf(pcpui, &state);
448 trapframe_t tf, *tf_p = &tf;
449 if (memcpy_from_user(current,&tf,(void*)state->gpr[8],sizeof(tf))) {
450 proc_incref(current, 1);
451 proc_destroy(current);
455 proc_secure_trapframe(&tf);
456 set_current_tf(pcpui, &tf_p);
461 handle_set_tf(trapframe_t* state)
464 if (memcpy_to_user(current,(void*)state->gpr[8],state,sizeof(*state))) {
465 proc_incref(current, 1);
466 proc_destroy(current);
472 handle_syscall(trapframe_t* state)
474 struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
475 uint32_t a0 = state->gpr[1];
476 uint32_t a1 = state->gpr[8];
480 struct per_cpu_info* coreinfo = &per_cpu_info[core_id()];
482 set_current_tf(pcpui, &state);
484 prep_syscalls(current, (struct syscall*)a0, a1);
492 register int foo asm("g1");
493 register int nwin asm("g2");
499 asm volatile ("1: deccc %0; bne,a 1b; save %%sp,-64,%%sp"
500 : "=r"(foo) : "r"(foo));
503 asm volatile ("1: deccc %0; bne,a 1b; restore"
504 : "=r"(foo) : "r"(foo));
508 handle_flushw(trapframe_t* state)
510 // don't actually need to do anything here.
511 // trap_entry flushes user windows to the stack.
516 handle_breakpoint(trapframe_t* state)