Exp: per-core runqueues and timer ticks
[akaros.git] / kern / arch / sparc / smp.c
1 #include <smp.h>
2 #include <arch/arch.h>
3 #include <arch/smp.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <error.h>
7 #include <assert.h>
8 #include <atomic.h>
9
10 #ifdef __SHARC__
11 #pragma nosharc
12 #endif
13
14 #ifdef __DEPUTY__
15 #pragma nodeputy
16 #endif
17
18 void
19 smp_boot(void)
20 {
21         extern int time_for_smp_init;
22         num_cpus = 1;
23         printd("Cores, report in!\n");
24         time_for_smp_init = 1;
25
26         smp_percpu_init();
27
28         while(*(volatile uint32_t*)&num_cpus < num_cores());
29
30         printd("%d cores reporting!\n",num_cpus);
31 }
32
33 void
34 smp_init(void)
35 {
36         static spinlock_t report_in_lock = SPINLOCK_INITIALIZER;
37
38         smp_percpu_init();
39         spin_lock(&report_in_lock);
40         num_cpus++;
41         spin_unlock(&report_in_lock);
42
43         printd("Good morning, Vietnam! (core id = %d)\n",core_id());
44
45         smp_idle();
46 }
47
48 handler_wrapper_t
49 wrapper_pool[MAX_NUM_CPUS*8] = {{{0},SPINLOCK_INITIALIZER}};
50
51 handler_wrapper_t*
52 smp_make_wrapper()
53 {
54         int i;
55         for(i = 0; i < sizeof(wrapper_pool)/sizeof(wrapper_pool[0]); i++)
56                 if(spin_trylock(&wrapper_pool[i].lock) == 0)
57                         return &wrapper_pool[i];
58         return NULL;
59 }
60
61 void
62 smp_call_wrapper(trapframe_t* tf, uint32_t src, isr_t handler,
63                  handler_wrapper_t* wrapper,void* data)
64 {
65         if(wrapper)
66                 wrapper->wait_list[core_id()] = 0;
67         handler(tf,data);
68 }
69
70 int smp_call_function_self(isr_t handler, void* data,
71                            handler_wrapper_t** wait_wrapper)
72 {
73         return smp_call_function_single(core_id(),handler,data,wait_wrapper);
74 }
75
76 int smp_call_function_all(isr_t handler, void* data,
77                           handler_wrapper_t** wait_wrapper)
78 {
79         int8_t state = 0;
80         int i;
81         handler_wrapper_t* wrapper = 0;
82         if(wait_wrapper)
83         {
84                 wrapper = *wait_wrapper = smp_make_wrapper();
85                 if(!wrapper)
86                         return -ENOMEM;
87
88                 for(i = 0; i < num_cores(); i++)
89                         wrapper->wait_list[i] = 1;
90         }
91
92         enable_irqsave(&state);
93
94         // send to others
95         for(i = 0; i < num_cores(); i++)
96         {
97                 if(i == core_id())
98                         continue;
99
100                 send_kernel_message(i,(amr_t)smp_call_wrapper,
101                                           handler, wrapper, data, KMSG_IMMEDIATE);
102         }
103
104         // send to me
105         send_kernel_message(core_id(),(amr_t)smp_call_wrapper,
106                                   handler,wrapper,data, KMSG_IMMEDIATE);
107
108         cpu_relax(); // wait to get the interrupt
109
110         disable_irqsave(&state);
111
112         return 0;
113 }
114
115 int smp_call_function_single(uint32_t dest, isr_t handler, void* data,
116                              handler_wrapper_t** wait_wrapper)
117 {
118         int8_t state = 0;
119         handler_wrapper_t* wrapper = 0;
120         if(wait_wrapper)
121         {
122                 wrapper = *wait_wrapper = smp_make_wrapper();
123                 if(!wrapper)
124                         return -ENOMEM;
125                 wrapper->wait_list[dest] = 1;
126         }
127
128         enable_irqsave(&state);
129
130         send_kernel_message(dest,(amr_t)smp_call_wrapper,
131                                   handler,wrapper,data, KMSG_IMMEDIATE);
132
133         cpu_relax(); // wait to get the interrupt, if it's to this core
134
135         disable_irqsave(&state);
136
137         return 0;
138 }
139
140 int smp_call_wait(handler_wrapper_t* wrapper)
141 {
142         int i;
143         for(i = 0; i < num_cores(); i++)
144                 while(wrapper->wait_list[i]);
145
146         spin_unlock(&wrapper->lock);
147         return 0;
148 }
149
150 /* Perform any initialization needed by per_cpu_info.  Right now, this just
151  * inits the amsg list (which sparc will probably also want).  Make sure every
152  * core calls this at some point in the smp_boot process. */
153 void smp_percpu_init(void)
154 {
155         uint32_t coreid = core_id();
156         spinlock_init(&per_cpu_info[coreid].immed_amsg_lock);
157         STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
158         spinlock_init(&per_cpu_info[coreid].routine_amsg_lock);
159         STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
160 #ifdef __CONFIG_EXPER_TRADPROC__
161         spinlock_init(&per_cpu_info[coreid].runqueue_lock);
162         TAILQ_INIT(&per_cpu_info[coreid].runqueue);
163         /* set a per-core periodic timer interrupt to go off every TIMER_uSEC usec*/
164         set_timer(TIMER_uSEC);
165 #endif
166 }