Arch independent per-cpu initialization
[akaros.git] / kern / src / smp.c
index 537233d..dc4e86f 100644 (file)
@@ -31,90 +31,67 @@ atomic_t outstanding_calls = 0;
  * - Management cores (core 0 for now) call manager, which should never return.
  * - Worker cores halt and wake up when interrupted, do any work on their work
  *   queue, then halt again.
- *
- * TODO: think about resetting the stack pointer at the beginning for worker
- * cores. (keeps the stack from growing if we never go back to userspace).
  * TODO: think about unifying the manager into a workqueue function, so we don't
  * need to check mgmt_core in here.  it gets a little ugly, since there are
  * other places where we check for mgmt and might not smp_idle / call manager.
  */
-void smp_idle(void)
+static void __smp_idle(void)
 {
        int8_t state = 0;
-       per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
+       struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
 
+       /* There was a process running here, and we should return to it */
+       if (pcpui->cur_tf) {                    /* aka, current_tf */
+               assert(pcpui->cur_proc);        /* aka, current */
+               proc_restartcore();
+               assert(0);
+       }
+       /* if we made it here, we truly want to idle */
+       /* in the future, we may need to proactively leave process context here.
+        * for now, it is possible to have a current loaded, even if we are idle
+        * (and presumably about to execute a kmsg or fire up a vcore). */
        if (!management_core()) {
                enable_irq();
                while (1) {
-                       process_routine_kmsg();
+                       process_routine_kmsg(0);
                        cpu_halt();
                }
        } else {
-               /* techincally, this check is arch dependent.  i want to know if it
-                * happens.  the enabling/disabling could be interesting. */
                enable_irqsave(&state);
-               if (!STAILQ_EMPTY(&myinfo->immed_amsgs) ||
-                       !STAILQ_EMPTY(&myinfo->routine_amsgs)) 
-                       printk("[kernel] kmsgs in smp_idle() on a management core.\n");
-               process_routine_kmsg();
+               /* this makes us wait to enter the manager til any IO is done (totally
+                * arbitrary 10ms), so we can handle the routine message that we
+                * currently use to do the completion.  Note this also causes us to wait
+                * 10ms regardless of how long the IO takes.  This all needs work. */
+               //udelay(10000); /* done in the manager for now */
+               process_routine_kmsg(0);
                disable_irqsave(&state);
                manager();
        }
        assert(0);
 }
 
-#ifdef __CONFIG_EXPER_TRADPROC__
-/* For experiments with per-core schedulers (traditional).  This checks the
- * runqueue, and if there is something there, it runs in.  Note this does
- * nothing for whoever was running here.  Consider saving and restoring them,
- * resetting current, etc. */
-void local_schedule(void)
-{
-       struct per_cpu_info *my_info = &per_cpu_info[core_id()];
-       struct proc *next_to_run;
-
-       spin_lock_irqsave(&my_info->runqueue_lock);
-       next_to_run = TAILQ_FIRST(&my_info->runqueue);
-       if (next_to_run)
-               TAILQ_REMOVE(&my_info->runqueue, next_to_run, proc_link);
-       spin_unlock_irqsave(&my_info->runqueue_lock);
-       if (!next_to_run)
-               return;
-       assert(next_to_run->state == PROC_RUNNING_M); // FILTHY HACK
-       printd("Core %d trying to run proc %08p\n", core_id(), next_to_run);
-       void proc_run_hand(struct trapframe *tf, uint32_t src_id, void *p, void *a1,
-                          void *a2)
-       {
-               proc_run((struct proc*)p);
-       }
-       send_kernel_message(core_id(), proc_run_hand, (void*)next_to_run, 0, 0,
-                           KMSG_ROUTINE);
-       return;
-}
-
-void local_schedule_proc(uint32_t core, struct proc *p)
+void smp_idle(void)
 {
-       assert(core); // for sanity don't put them on core0 or any management core
-       struct per_cpu_info *my_info = &per_cpu_info[core];
-       spin_lock_irqsave(&my_info->runqueue_lock);
-       TAILQ_INSERT_TAIL(&my_info->runqueue, p, proc_link);
-       printd("SCHED: inserting proc %p on core %d\n", p, core);
-       spin_unlock_irqsave(&my_info->runqueue_lock);
+       #ifdef __CONFIG_RESET_STACKS__
+       set_stack_pointer(get_stack_top());
+       #endif /* __CONFIG_RESET_STACKS__ */
+       __smp_idle();
+       assert(0);
 }
 
-/* ghetto func to act like a load balancer.  for now, it just looks at the head
- * of every other cpu's queue. */
-void load_balance(void)
+/* Arch-independent per-cpu initialization.  This will call the arch dependent
+ * init first. */
+void smp_percpu_init(void)
 {
-       struct per_cpu_info *other_info;
-       struct proc *dummy;
-
-       for (int i = 0; i < num_cpus; i++) {
-               other_info = &per_cpu_info[i];
-               spin_lock_irqsave(&other_info->runqueue_lock);
-               dummy = TAILQ_FIRST(&other_info->runqueue);
-               spin_unlock_irqsave(&other_info->runqueue_lock);
-       }
+       uint32_t coreid = core_id();
+       /* Do this first */
+       __arch_pcpu_init(coreid);
+       per_cpu_info[coreid].spare = 0;
+       /* Init relevant lists */
+       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
+       STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
+       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
+       STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
+       /* Initialize the per-core timer chain */
+       init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt);
 }
-
-#endif /* __CONFIG_EXPER_TRADPROC__ */