Arch independent per-cpu initialization
[akaros.git] / kern / arch / sparc / smp.c
index 8b7f949..b00ac84 100644 (file)
@@ -23,6 +23,8 @@ smp_boot(void)
        printd("Cores, report in!\n");
        time_for_smp_init = 1;
 
+       smp_percpu_init();
+
        while(*(volatile uint32_t*)&num_cpus < num_cores());
 
        printd("%d cores reporting!\n",num_cpus);
@@ -33,6 +35,7 @@ smp_init(void)
 {
        static spinlock_t report_in_lock = SPINLOCK_INITIALIZER;
 
+       smp_percpu_init();
        spin_lock(&report_in_lock);
        num_cpus++;
        spin_unlock(&report_in_lock);
@@ -95,12 +98,12 @@ int smp_call_function_all(isr_t handler, void* data,
                        continue;
 
                send_kernel_message(i,(amr_t)smp_call_wrapper,
-                                         handler, wrapper, data, AMSG_IMMEDIATE);
+                                         handler, wrapper, data, KMSG_IMMEDIATE);
        }
 
        // send to me
        send_kernel_message(core_id(),(amr_t)smp_call_wrapper,
-                                 handler,wrapper,data, AMSG_IMMEDIATE);
+                                 handler,wrapper,data, KMSG_IMMEDIATE);
 
        cpu_relax(); // wait to get the interrupt
 
@@ -125,7 +128,7 @@ int smp_call_function_single(uint32_t dest, isr_t handler, void* data,
        enable_irqsave(&state);
 
        send_kernel_message(dest,(amr_t)smp_call_wrapper,
-                                 handler,wrapper,data, AMSG_IMMEDIATE);
+                                 handler,wrapper,data, KMSG_IMMEDIATE);
 
        cpu_relax(); // wait to get the interrupt, if it's to this core
 
@@ -147,11 +150,6 @@ int smp_call_wait(handler_wrapper_t* wrapper)
 /* Perform any initialization needed by per_cpu_info.  Right now, this just
  * inits the amsg list (which sparc will probably also want).  Make sure every
  * core calls this at some point in the smp_boot process. */
-void smp_percpu_init(void)
+void __arch_pcpu_init(uint32_t coreid)
 {
-       uint32_t coreid = core_id();
-       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
-       STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
-       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
-       STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
 }