akaros/kern/arch/riscv/trap.c
<<
>>
Prefs
   1#include <arch/arch.h>
   2#include <arch/console.h>
   3#include <assert.h>
   4#include <manager.h>
   5#include <mm.h>
   6#include <monitor.h>
   7#include <pmap.h>
   8#include <process.h>
   9#include <slab.h>
  10#include <smp.h>
  11#include <stdio.h>
  12#include <string.h>
  13#include <syscall.h>
  14#include <trap.h>
  15#include <umem.h>
  16
  17/* These are the stacks the kernel will load when it receives a trap from user
  18 * space.  The deal is that they get set right away in entry.S, and can always
  19 * be used for finding the top of the stack (from which you should subtract the
  20 * sizeof the trapframe.  Note, we need to have a junk value in the array so
  21 * that this is NOT part of the BSS.  If it is in the BSS, it will get 0'd in
  22 * kernel_init(), which is after these values get set.
  23 *
  24 * TODO: if these end up becoming contended cache lines, move this to
  25 * per_cpu_info. */
  26uintptr_t core_stacktops[MAX_NUM_CORES] = {0xcafebabe, 0};
  27
  28void advance_pc(struct hw_trapframe *state)
  29{
  30        state->epc += 4;
  31}
  32
  33/* Set stacktop for the current core to be the stack the kernel will start on
  34 * when trapping/interrupting from userspace */
  35void set_stack_top(uintptr_t stacktop)
  36{
  37        core_stacktops[core_id()] = stacktop;
  38}
  39
  40/* Note the assertion assumes we are in the top page of the stack. */
  41uintptr_t get_stack_top(void)
  42{
  43        register uintptr_t sp asm("sp");
  44        uintptr_t stacktop = core_stacktops[core_id()];
  45        assert(ROUNDUP(sp, PGSIZE) == stacktop);
  46        return stacktop;
  47}
  48
  49void idt_init(void)
  50{
  51}
  52
  53/* Helper.  For now, this copies out the TF to pcpui, and sets cur_ctx to point
  54 * to it. */
  55static void set_current_ctx_hw(struct per_cpu_info *pcpui,
  56                               struct hw_trapframe *hw_tf)
  57{
  58        if (irq_is_enabled())
  59                warn("Turn off IRQs until cur_ctx is set!");
  60        assert(!pcpui->cur_ctx);
  61        pcpui->actual_ctx.type = ROS_HW_CTX;
  62        pcpui->actual_ctx.tf.hw_tf = *hw_tf;
  63        pcpui->cur_ctx = &pcpui->actual_ctx;
  64}
  65
  66static void set_current_ctx_sw(struct per_cpu_info *pcpui,
  67                               struct sw_trapframe *sw_tf)
  68{
  69        if (irq_is_enabled())
  70                warn("Turn off IRQs until cur_ctx is set!");
  71        assert(!pcpui->cur_ctx);
  72        pcpui->actual_ctx.type = ROS_SW_CTX;
  73        pcpui->actual_ctx.tf.sw_tf = *sw_tf;
  74        pcpui->cur_ctx = &pcpui->actual_ctx;
  75}
  76
  77static int format_trapframe(struct hw_trapframe *hw_tf, char *buf, int bufsz)
  78{
  79        // slightly hackish way to read out the instruction that faulted.
  80        // not guaranteed to be right 100% of the time
  81        uint32_t insn;
  82        if (!(current &&
  83              !memcpy_from_user(current, &insn, (void *)hw_tf->epc, 4)))
  84                insn = -1;
  85
  86        int len = snprintf(buf, bufsz, "TRAP frame at %p on core %d\n", hw_tf,
  87                           core_id());
  88        static const char *regnames[] = {
  89            "z ", "ra", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8",
  90            "s9", "sA", "sB", "sp", "tp", "v0", "v1", "a0", "a1", "a2", "a3",
  91            "a4", "a5", "a6", "a7", "a8", "a9", "aA", "aB", "aC", "aD"};
  92
  93        hw_tf->gpr[0] = 0;
  94
  95        for (int i = 0; i < 32; i += 4) {
  96                for (int j = 0; j < 4; j++)
  97                        len += snprintf(buf + len, bufsz - len, "%s %016lx%c",
  98                                        regnames[i + j], hw_tf->gpr[i + j],
  99                                        j < 3 ? ' ' : '\n');
 100        }
 101        len += snprintf(buf + len, bufsz - len,
 102                        "sr %016lx pc %016lx va %016lx insn       %08x\n",
 103                        hw_tf->sr, hw_tf->epc, hw_tf->badvaddr, insn);
 104
 105        buf[bufsz - 1] = 0;
 106        return len;
 107}
 108
 109void print_trapframe(struct hw_trapframe *hw_tf)
 110{
 111        char buf[1024];
 112        int len = format_trapframe(hw_tf, buf, sizeof(buf));
 113        cputbuf(buf, len);
 114}
 115
 116void print_swtrapframe(struct sw_trapframe *sw_tf)
 117{
 118#warning "fix me"
 119}
 120
 121void print_vmtrapframe(struct vm_trapframe *vm_tf)
 122{
 123#warning "fix me"
 124}
 125
 126static void exit_halt_loop(struct hw_trapframe *hw_tf)
 127{
 128        extern char after_cpu_halt;
 129        if ((char *)hw_tf->epc >= (char *)&cpu_halt &&
 130            (char *)hw_tf->epc < &after_cpu_halt)
 131                hw_tf->epc = hw_tf->gpr[GPR_RA];
 132}
 133
 134static void handle_keypress(char c)
 135{
 136#warning "fix me"
 137        /* TODO: does cons_init need to be before cons_add_char?  Also, do
 138         * something with CTRL-G, Q, and B. */
 139        cons_add_char(c);
 140
 141        cons_init();
 142}
 143
 144static void handle_host_interrupt(struct hw_trapframe *hw_tf)
 145{
 146        uintptr_t fh = mtpcr(PCR_FROMHOST, 0);
 147        switch (fh >> 56) {
 148        case 0x00:
 149                return;
 150        case 0x01:
 151                handle_keypress(fh);
 152                return;
 153        default:
 154                assert(0);
 155        }
 156}
 157
 158static void handle_timer_interrupt(struct hw_trapframe *hw_tf)
 159{
 160        timer_interrupt(hw_tf, NULL);
 161}
 162
 163/* Assumes that any IPI you get is really a kernel message */
 164static void handle_interprocessor_interrupt(struct hw_trapframe *hw_tf)
 165{
 166        clear_ipi();
 167        handle_kmsg_ipi(hw_tf, 0);
 168}
 169
 170static void unhandled_trap(struct hw_trapframe *state, const char *name)
 171{
 172        static spinlock_t screwup_lock = SPINLOCK_INITIALIZER;
 173        spin_lock(&screwup_lock);
 174
 175        if (in_kernel(state)) {
 176                print_trapframe(state);
 177                panic("Unhandled trap in kernel!\nTrap type: %s", name);
 178        } else {
 179                char tf_buf[1024];
 180                format_trapframe(state, tf_buf, sizeof(tf_buf));
 181
 182                warn("Unhandled trap in user!\nTrap type: %s\n%s", name,
 183                     tf_buf);
 184                backtrace();
 185                spin_unlock(&screwup_lock);
 186
 187                assert(current);
 188                proc_destroy(current);
 189        }
 190}
 191
 192static void handle_misaligned_fetch(struct hw_trapframe *state)
 193{
 194        unhandled_trap(state, "Misaligned Fetch");
 195}
 196
 197static void handle_misaligned_load(struct hw_trapframe *state)
 198{
 199        unhandled_trap(state, "Misaligned Load");
 200}
 201
 202static void handle_misaligned_store(struct hw_trapframe *state)
 203{
 204        unhandled_trap(state, "Misaligned Store");
 205}
 206
 207static void handle_fault_fetch(struct hw_trapframe *state)
 208{
 209        if (in_kernel(state)) {
 210                print_trapframe(state);
 211                panic("Instruction Page Fault in the Kernel at %p!",
 212                      state->epc);
 213        }
 214
 215        set_current_ctx_hw(&per_cpu_info[core_id()], state);
 216
 217#warning "returns EAGAIN if you should reflect the fault"
 218        if (handle_page_fault(current, state->epc, PROT_EXEC))
 219                unhandled_trap(state, "Instruction Page Fault");
 220}
 221
 222static void handle_fault_load(struct hw_trapframe *state)
 223{
 224        if (in_kernel(state)) {
 225                print_trapframe(state);
 226                panic("Load Page Fault in the Kernel at %p!", state->badvaddr);
 227        }
 228
 229        set_current_ctx_hw(&per_cpu_info[core_id()], state);
 230
 231#warning "returns EAGAIN if you should reflect the fault"
 232        if (handle_page_fault(current, state->badvaddr, PROT_READ))
 233                unhandled_trap(state, "Load Page Fault");
 234}
 235
 236static void handle_fault_store(struct hw_trapframe *state)
 237{
 238        if (in_kernel(state)) {
 239                print_trapframe(state);
 240                panic("Store Page Fault in the Kernel at %p!", state->badvaddr);
 241        }
 242
 243        set_current_ctx_hw(&per_cpu_info[core_id()], state);
 244
 245        if (handle_page_fault(current, state->badvaddr, PROT_WRITE))
 246                unhandled_trap(state, "Store Page Fault");
 247}
 248
 249static void handle_illegal_instruction(struct hw_trapframe *state)
 250{
 251        assert(!in_kernel(state));
 252
 253        struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
 254        set_current_ctx_hw(pcpui, state);
 255        if (emulate_fpu(state) == 0) {
 256                advance_pc(&pcpui->cur_ctx->tf.hw_tf);
 257                return;
 258        }
 259
 260        unhandled_trap(state, "Illegal Instruction");
 261}
 262
 263static void handle_syscall(struct hw_trapframe *state)
 264{
 265        uintptr_t a0 = state->gpr[GPR_A0];
 266        uintptr_t a1 = state->gpr[GPR_A1];
 267
 268        advance_pc(state);
 269        set_current_ctx_hw(&per_cpu_info[core_id()], state);
 270        enable_irq();
 271        prep_syscalls(current, (struct syscall *)a0, a1);
 272}
 273
 274static void handle_breakpoint(struct hw_trapframe *state)
 275{
 276        advance_pc(state);
 277        monitor(state);
 278}
 279
 280void handle_trap(struct hw_trapframe *hw_tf)
 281{
 282        static void (*const trap_handlers[])(struct hw_trapframe *) = {
 283            [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch,
 284            [CAUSE_FAULT_FETCH] = handle_fault_fetch,
 285            [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction,
 286            [CAUSE_PRIVILEGED_INSTRUCTION] = handle_illegal_instruction,
 287            [CAUSE_SYSCALL] = handle_syscall,
 288            [CAUSE_BREAKPOINT] = handle_breakpoint,
 289            [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load,
 290            [CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
 291            [CAUSE_FAULT_LOAD] = handle_fault_load,
 292            [CAUSE_FAULT_STORE] = handle_fault_store,
 293        };
 294
 295        static void (*const irq_handlers[])(struct hw_trapframe *) = {
 296            [IRQ_TIMER] = handle_timer_interrupt,
 297            [IRQ_HOST] = handle_host_interrupt,
 298            [IRQ_IPI] = handle_interprocessor_interrupt,
 299        };
 300
 301        struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
 302        if (hw_tf->cause < 0) {
 303                uint8_t irq = hw_tf->cause;
 304                assert(irq < sizeof(irq_handlers) / sizeof(irq_handlers[0]) &&
 305                       irq_handlers[irq]);
 306
 307                if (in_kernel(hw_tf))
 308                        exit_halt_loop(hw_tf);
 309                else
 310                        set_current_ctx_hw(&per_cpu_info[core_id()], hw_tf);
 311
 312                inc_irq_depth(pcpui);
 313                irq_handlers[irq](hw_tf);
 314                dec_irq_depth(pcpui);
 315        } else {
 316                assert(hw_tf->cause <
 317                           sizeof(trap_handlers) / sizeof(trap_handlers[0]) &&
 318                       trap_handlers[hw_tf->cause]);
 319                if (in_kernel(hw_tf)) {
 320                        inc_ktrap_depth(pcpui);
 321                        trap_handlers[hw_tf->cause](hw_tf);
 322                        dec_ktrap_depth(pcpui);
 323                } else {
 324                        trap_handlers[hw_tf->cause](hw_tf);
 325                }
 326#warning "if a trap wasn't handled fully, like an MCP pf, reflect it
 327                reflect_unhandled_trap(hw_tf->tf_trapno, hw_tf->tf_err, aux);
 328        }
 329
 330        extern void pop_hw_tf(struct hw_trapframe * tf); /* in asm */
 331        /* Return to the current process, which should be runnable.  If we're
 332         * the kernel, we should just return naturally.  Note that current and
 333         * tf need to still be okay (might not be after blocking) */
 334        if (in_kernel(hw_tf))
 335                pop_hw_tf(hw_tf);
 336        else
 337                proc_restartcore();
 338}
 339
 340/* We don't have NMIs now. */
 341void send_nmi(uint32_t os_coreid)
 342{
 343        printk("%s not implemented\n", __FUNCTION);
 344}
 345
 346int register_irq(int irq, isr_t handler, void *irq_arg, uint32_t tbdf)
 347{
 348        printk("%s not implemented\n", __FUNCTION);
 349        return -1;
 350}
 351
 352int route_irqs(int cpu_vec, int coreid)
 353{
 354        printk("%s not implemented\n", __FUNCTION);
 355        return -1;
 356}
 357
 358void __arch_reflect_trap_hwtf(struct hw_trapframe *hw_tf, unsigned int trap_nr,
 359                              unsigned int err, unsigned long aux)
 360{
 361        printk("%s not implemented\n", __FUNCTION);
 362}
 363