Arch independent per-cpu initialization
authorBarret Rhoden <brho@cs.berkeley.edu>
Wed, 4 May 2011 23:16:07 +0000 (16:16 -0700)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:36:02 +0000 (17:36 -0700)
Put the common stuff in k/s/smp.c, and the arch dependent stuff in
k/a/whatever.

kern/arch/i686/smp_boot.c
kern/arch/sparc/smp.c
kern/include/smp.h
kern/src/smp.c

index 864cf75..d40f9d6 100644 (file)
@@ -280,9 +280,8 @@ uint32_t smp_main(void)
  * must still call this for core 0.  This must NOT be called from smp_main,
  * since it relies on the kernel stack pointer to find the gdt.  Be careful not
  * to call it on too deep of a stack frame. */
-void smp_percpu_init(void)
+void __arch_pcpu_init(uint32_t coreid)
 {
-       uint32_t coreid = core_id();
        uintptr_t my_stack_bot;
 
        /* Flushes any potentially old mappings from smp_boot() (note the page table
@@ -306,14 +305,6 @@ void smp_percpu_init(void)
                per_cpu_info[coreid].gdt = (segdesc_t*)(*(uintptr_t*)my_stack_bot +
                                           sizeof(taskstate_t) + sizeof(pseudodesc_t));
        }
-       per_cpu_info[coreid].spare = 0;
-       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
-       STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
-       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
-       STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
-       
        /* need to init perfctr before potentiall using it in timer handler */
        perfmon_init();
-       /* Initialize the per-core timer chain */
-       init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt);
 }
index 267e86e..b00ac84 100644 (file)
@@ -150,14 +150,6 @@ int smp_call_wait(handler_wrapper_t* wrapper)
 /* Perform any initialization needed by per_cpu_info.  Right now, this just
  * inits the amsg list (which sparc will probably also want).  Make sure every
  * core calls this at some point in the smp_boot process. */
-void smp_percpu_init(void)
+void __arch_pcpu_init(uint32_t coreid)
 {
-       uint32_t coreid = core_id();
-       per_cpu_info[coreid].spare = 0;
-       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
-       STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
-       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
-       STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
-       /* Initialize the per-core timer chain */
-       init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt);
 }
index 076ebca..4f20048 100644 (file)
@@ -68,6 +68,7 @@ extern volatile uint32_t RO num_cpus;
 void smp_boot(void);
 void smp_idle(void) __attribute__((noreturn));
 void smp_percpu_init(void); // this must be called by each core individually
+void __arch_pcpu_init(uint32_t coreid);        /* each arch has one of these */
 
 /* SMP utility functions */
 int smp_call_function_self(poly_isr_t handler, TV(t) data,
index a2c0a38..dc4e86f 100644 (file)
@@ -78,3 +78,20 @@ void smp_idle(void)
        __smp_idle();
        assert(0);
 }
+
+/* Arch-independent per-cpu initialization.  This will call the arch dependent
+ * init first. */
+void smp_percpu_init(void)
+{
+       uint32_t coreid = core_id();
+       /* Do this first */
+       __arch_pcpu_init(coreid);
+       per_cpu_info[coreid].spare = 0;
+       /* Init relevant lists */
+       spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
+       STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
+       spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
+       STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
+       /* Initialize the per-core timer chain */
+       init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt);
+}