Load balancing for EXPER_TRADPROC
authorBarret Rhoden <brho@cs.berkeley.edu>
Sat, 1 May 2010 04:23:00 +0000 (21:23 -0700)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:35:46 +0000 (17:35 -0700)
Runs once every 100ms, for now.

kern/arch/i686/smp_boot.c
kern/arch/sparc/smp.c
kern/include/smp.h
kern/src/smp.c
kern/src/timer.c

index 4a1dd73..92c9954 100644 (file)
@@ -319,6 +319,7 @@ void smp_percpu_init(void)
        spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
        STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
 #ifdef __CONFIG_EXPER_TRADPROC__
+       per_cpu_info[coreid].ticks = 0;
        spinlock_init(&per_cpu_info[coreid].runqueue_lock);
        TAILQ_INIT(&per_cpu_info[coreid].runqueue);
        /* set a per-core timer interrupt to go off and call local_schedule every
index 8e95f9b..c9acbf6 100644 (file)
@@ -158,6 +158,7 @@ void smp_percpu_init(void)
        spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
        STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
 #ifdef __CONFIG_EXPER_TRADPROC__
+       per_cpu_info[coreid].ticks = 0;
        spinlock_init(&per_cpu_info[coreid].runqueue_lock);
        TAILQ_INIT(&per_cpu_info[coreid].runqueue);
        set_core_timer(TIMER_uSEC);
index 59aec6e..a543a29 100644 (file)
@@ -40,6 +40,7 @@ struct per_cpu_info {
        spinlock_t routine_amsg_lock;
        struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) routine_amsgs;
 #ifdef __CONFIG_EXPER_TRADPROC__
+       unsigned int ticks; /* how many times the tick went off.  can roll over */
        spinlock_t runqueue_lock;
        struct proc_list runqueue;
 #endif /* __CONFIG_EXPER_TRADPROC__ */
@@ -70,6 +71,7 @@ int smp_call_wait(handler_wrapper_t*SAFE wrapper);
 
 void local_schedule(void);
 void local_schedule_proc(uint32_t core, struct proc *p);
+void load_balance(void);
 
 #endif /* __CONFIG_EXPER_TRADPROC__ */
 
index c368467..537233d 100644 (file)
@@ -101,4 +101,20 @@ void local_schedule_proc(uint32_t core, struct proc *p)
        printd("SCHED: inserting proc %p on core %d\n", p, core);
        spin_unlock_irqsave(&my_info->runqueue_lock);
 }
+
+/* ghetto func to act like a load balancer.  for now, it just looks at the head
+ * of every other cpu's queue. */
+void load_balance(void)
+{
+       struct per_cpu_info *other_info;
+       struct proc *dummy;
+
+       for (int i = 0; i < num_cpus; i++) {
+               other_info = &per_cpu_info[i];
+               spin_lock_irqsave(&other_info->runqueue_lock);
+               dummy = TAILQ_FIRST(&other_info->runqueue);
+               spin_unlock_irqsave(&other_info->runqueue_lock);
+       }
+}
+
 #endif /* __CONFIG_EXPER_TRADPROC__ */
index a87abda..1212ecd 100644 (file)
@@ -83,6 +83,10 @@ void train_timing()
 void timer_interrupt(struct trapframe *tf, void *data)
 {
 #ifdef __CONFIG_EXPER_TRADPROC__
+       /* about every 10 ticks (100ms) run the load balancer.  Offset by coreid so
+        * it's not as horrible.  */
+       if (per_cpu_info[core_id()].ticks % 10 == core_id())
+               load_balance();
        local_schedule();
 #endif /* __CONFIG_EXPER_TRADPROC__ */
 }