#include <kdebug.h>
#include <kmalloc.h>
-void reflect_unhandled_trap(unsigned int trap_nr, unsigned int err,
- unsigned long aux)
+static void print_unhandled_trap(struct proc *p, struct user_context *ctx,
+ unsigned int trap_nr, unsigned int err,
+ unsigned long aux)
+{
+ struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
+ uint32_t vcoreid = pcpui->owning_vcoreid;
+ struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[vcoreid];
+ static spinlock_t print_trap_lock = SPINLOCK_INITIALIZER;
+
+ spin_lock(&print_trap_lock);
+ if (!proc_is_vcctx_ready(p))
+ printk("Unhandled user trap from early SCP\n");
+ else if (vcpd->notif_disabled)
+ printk("Unhandled user trap in vcore context from VC %d\n", vcoreid);
+ print_user_ctx(ctx);
+ printk("err 0x%x (for PFs: User 4, Wr 2, Rd 1), aux %p\n", err, aux);
+ debug_addr_proc(p, get_user_ctx_pc(ctx));
+ print_vmrs(p);
+ backtrace_user_ctx(p, ctx);
+ spin_unlock(&print_trap_lock);
+}
+
+/* Traps that are considered normal operations. */
+static bool benign_trap(unsigned int err)
+{
+ return err & PF_VMR_BACKED;
+}
+
+static void printx_unhandled_trap(struct proc *p, struct user_context *ctx,
+ unsigned int trap_nr, unsigned int err,
+ unsigned long aux)
+{
+ if (printx_on && !benign_trap(err))
+ print_unhandled_trap(p, ctx, trap_nr, err, aux);
+}
+
+/* Helper, reflects the current context back to the 2LS. Returns 0 on success,
+ * -1 on failure. */
+int reflect_current_context(void)
{
uint32_t coreid = core_id();
struct per_cpu_info *pcpui = &per_cpu_info[coreid];
struct proc *p = pcpui->cur_proc;
uint32_t vcoreid = pcpui->owning_vcoreid;
struct preempt_data *vcpd = &p->procdata->vcore_preempt_data[vcoreid];
- struct hw_trapframe *hw_tf = &pcpui->cur_ctx->tf.hw_tf;
+
+ assert(pcpui->cur_proc == pcpui->owning_proc);
+ if (!proc_is_vcctx_ready(p))
+ return -1;
+ if (vcpd->notif_disabled)
+ return -1;
+ /* the guts of a __notify */
+ vcpd->notif_disabled = TRUE;
+ copy_current_ctx_to(&vcpd->uthread_ctx);
+ memset(pcpui->cur_ctx, 0, sizeof(struct user_context));
+ proc_init_ctx(pcpui->cur_ctx, vcoreid, vcpd->vcore_entry,
+ vcpd->vcore_stack, vcpd->vcore_tls_desc);
+ return 0;
+}
+
+void reflect_unhandled_trap(unsigned int trap_nr, unsigned int err,
+ unsigned long aux)
+{
+ uint32_t coreid = core_id();
+ struct per_cpu_info *pcpui = &per_cpu_info[coreid];
+ struct proc *p = pcpui->cur_proc;
+
assert(p);
assert(pcpui->cur_ctx && (pcpui->cur_ctx->type == ROS_HW_CTX));
- if (!(p->procinfo->is_mcp)) {
- printk("Unhandled SCP trap\n");
- goto error_out;
- }
- if (vcpd->notif_disabled) {
- printk("Unhandled MCP trap in vcore context\n");
- goto error_out;
- }
/* need to store trap_nr, err code, and aux into the tf so that it can get
* extracted on the other end, and we need to flag the TF in some way so we
* can tell it was reflected. for example, on a PF, we need some number (14
* on x86), the prot violation (write, read, etc), and the virt addr (aux).
* parlib will know how to extract this info. */
- __arch_reflect_trap_hwtf(hw_tf, trap_nr, err, aux);
- /* the guts of a __notify */
- vcpd->notif_disabled = TRUE;
- vcpd->uthread_ctx = *pcpui->cur_ctx;
- memset(pcpui->cur_ctx, 0, sizeof(struct user_context));
- proc_init_ctx(pcpui->cur_ctx, vcoreid, p->env_entry,
- vcpd->transition_stack, vcpd->vcore_tls_desc);
- return;
-error_out:
- print_trapframe(hw_tf);
- enable_irq();
- printk("err 0x%x, aux %p\n", err, aux);
- debug_addr_proc(p, get_hwtf_pc(hw_tf));
- proc_destroy(p);
+ __arch_reflect_trap_hwtf(&pcpui->cur_ctx->tf.hw_tf, trap_nr, err, aux);
+ printx_unhandled_trap(p, pcpui->cur_ctx, trap_nr, err, aux);
+ if (reflect_current_context()) {
+ print_unhandled_trap(p, pcpui->cur_ctx, trap_nr, err, aux);
+ proc_destroy(p);
+ }
+}
+
+/* Helper, copies the current context to to_ctx. */
+void copy_current_ctx_to(struct user_context *to_ctx)
+{
+ struct user_context *cur_ctx = current_ctx;
+
+ /* Be sure to finalize into cur_ctx, not the to_ctx. o/w the arch could get
+ * confused by other calls to finalize. */
+ arch_finalize_ctx(cur_ctx);
+ *to_ctx = *cur_ctx;
}
struct kmem_cache *kernel_msg_cache;
void kernel_msg_init(void)
{
kernel_msg_cache = kmem_cache_create("kernel_msgs",
- sizeof(struct kernel_message), ARCH_CL_SIZE, 0, 0, 0);
+ sizeof(struct kernel_message),
+ ARCH_CL_SIZE, 0, NULL, 0, 0, NULL);
}
uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
* it's not running on behalf of a process, and we're actually spawning
* a kernel task. While we do have a syscall that does work in an RKM
* (change_to), it's not really the rest of the syscall context. */
- pcpui->cur_kthread->is_ktask = TRUE;
+ pcpui->cur_kthread->flags = KTH_KTASK_FLAGS;
pcpui_trace_kmsg(pcpui, (uintptr_t)msg_cp.pc);
msg_cp.pc(msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
- /* And if we make it back, be sure to unset this. If we never return,
- * but the kthread exits via some other way (smp_idle()), then
- * smp_idle() will deal with the flag. The default state is "off". For
- * an example of an RKM that does this, check out the
- * monitor->mon_bin_run. Finally, if the kthread gets swapped out of
- * pcpui, such as in __launch_kthread(), the next time the kthread is
- * reused, is_ktask will be reset. */
- pcpui->cur_kthread->is_ktask = FALSE;
+ /* And if we make it back, be sure to restore the default flags. If we
+ * never return, but the kthread exits via some other way (smp_idle()),
+ * then smp_idle() will deal with the flags. The default state includes
+ * 'not a ktask'. */
+ pcpui->cur_kthread->flags = KTH_DEFAULT_FLAGS;
/* If we aren't still in early RKM, it is because the KMSG blocked
* (thus leaving early RKM, finishing in default context) and then
* returned. This is a 'detached' RKM. Must idle in this scenario,
struct kernel_message *kmsg_i;
STAILQ_FOREACH(kmsg_i, list, link) {
fn_name = get_fn_name((long)kmsg_i->pc);
- printk("%s KMSG on %d from %d to run %p(%s)\n", type,
- kmsg_i->dstid, kmsg_i->srcid, kmsg_i->pc, fn_name);
+ printk("%s KMSG on %d from %d to run %p(%s)(%p, %p, %p)\n", type,
+ kmsg_i->dstid, kmsg_i->srcid, kmsg_i->pc, fn_name,
+ kmsg_i->arg0, kmsg_i->arg1, kmsg_i->arg2);
kfree(fn_name);
}
}
{
struct kernel_message *kmsg;
bool immed_emp, routine_emp;
- for (int i = 0; i < num_cpus; i++) {
+ for (int i = 0; i < num_cores; i++) {
spin_lock_irqsave(&per_cpu_info[i].immed_amsg_lock);
immed_emp = STAILQ_EMPTY(&per_cpu_info[i].immed_amsgs);
spin_unlock_irqsave(&per_cpu_info[i].immed_amsg_lock);
printk("\targ1: %p\n", kmsg->arg1);
printk("\targ2: %p\n", kmsg->arg2);
}
-
+
}
}
{
uint32_t coreid = core_id();
struct per_cpu_info *pcpui = &per_cpu_info[coreid];
-
+
if (!str)
str = "(none)";
printk("%s: Core %d, irq depth %d, ktrap depth %d, irqon %d\n", str, coreid,
void print_user_ctx(struct user_context *ctx)
{
- if (ctx->type == ROS_SW_CTX)
- print_swtrapframe(&ctx->tf.sw_tf);
- else if (ctx->type == ROS_HW_CTX)
+ switch (ctx->type) {
+ case ROS_HW_CTX:
print_trapframe(&ctx->tf.hw_tf);
- else
+ break;
+ case ROS_SW_CTX:
+ print_swtrapframe(&ctx->tf.sw_tf);
+ break;
+ case ROS_VM_CTX:
+ print_vmtrapframe(&ctx->tf.vm_tf);
+ break;
+ default:
printk("Bad TF %p type %d!\n", ctx, ctx->type);
+ }
}