Fixed DISABLE_SMT to report the right max_vcores
[akaros.git] / kern / arch / i686 / smp_boot.c
index 0ce45e0..4a1dd73 100644 (file)
@@ -160,7 +160,11 @@ void smp_boot(void)
        // mapping pulled out from under them.  Now, if a core loses, it will spin
        // on the trampoline (which we must be careful to not deallocate)
        __spin_lock(get_smp_bootlock());
-       cprintf("Num_Cpus Detected: %d\n", num_cpus);
+       printk("Number of Cores Detected: %d\n", num_cpus);
+#ifdef __CONFIG_DISABLE_SMT__
+       assert(!(num_cpus % 2));
+       printk("Using only %d Idlecores (SMT Disabled)\n", num_cpus >> 1);
+#endif /* __CONFIG_DISABLE_SMT__ */
        smp_remap_coreids();
 
        // Remove the mapping of the page used by the trampoline
@@ -301,11 +305,25 @@ void smp_percpu_init(void)
 {
        uint32_t coreid = core_id();
 
+       /* Ensure the FPU units are initialized */
+       asm volatile ("fninit");
+
        /* core 0 sets up via the global gdt symbol */
        if (!coreid)
                per_cpu_info[0].gdt = gdt;
        else
                per_cpu_info[coreid].gdt = (segdesc_t*)(ROUNDUP(read_esp(), PGSIZE)
                                           - sizeof(segdesc_t)*SEG_COUNT);
-       STAILQ_INIT(&per_cpu_info[coreid].active_msgs);
+       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
+       STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
+       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
+       STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
+#ifdef __CONFIG_EXPER_TRADPROC__
+       spinlock_init(&per_cpu_info[coreid].runqueue_lock);
+       TAILQ_INIT(&per_cpu_info[coreid].runqueue);
+       /* set a per-core timer interrupt to go off and call local_schedule every
+        * TIMER_uSEC microseconds.  The handler is registered independently of
+        * EXPER_TRADPROC, in line with what sparc does. */
+       set_core_timer(TIMER_uSEC);
+#endif
 }