2 * Copyright (c) 2009 The Regents of the University of California
3 * Barret Rhoden <brho@cs.berkeley.edu>
4 * See LICENSE for details.
22 #include <completion.h>
26 struct completion comp;
31 struct per_cpu_info per_cpu_info[MAX_NUM_CORES];
33 // tracks number of global waits on smp_calls, must be <= NUM_HANDLER_WRAPPERS
34 atomic_t outstanding_calls = 0;
36 /* Helper for running a proc (if we should). Lots of repetition with
38 static void try_run_proc(void)
40 struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
42 /* There was a process running here, and we should return to it. */
43 if (pcpui->owning_proc) {
44 assert(!pcpui->cur_kthread->sysc);
45 assert(pcpui->cur_ctx);
46 __proc_startcore(pcpui->owning_proc, pcpui->cur_ctx);
49 /* Make sure we have abandoned core. It's possible to have an
50 * owner without a current (smp_idle, __startcore, __death).
52 * If we had a current process, we might trigger __proc_free,
53 * which could send us a KMSG. Since we're called after PRKM,
54 * let's just restart the idle loop. */
60 /* All cores end up calling this whenever there is nothing left to do or they
61 * don't know explicitly what to do. Non-zero cores call it when they are done
62 * booting. Other cases include after getting a DEATH IPI.
64 * All cores attempt to run the context of any owning proc. Barring that, they
65 * halt and wake up when interrupted, do any work on their work queue, then halt
66 * again. In between, the ksched gets a chance to tell it to do something else,
67 * or perhaps to halt in another manner. */
68 static void __attribute__((noreturn)) __smp_idle(void *arg)
70 struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
72 pcpui->cur_kthread->flags = KTH_DEFAULT_FLAGS;
74 /* This might wake a kthread (the gp ktask), so be sure to run
75 * PRKM after reporting the quiescent state. */
77 /* If this runs an RKM, we'll call smp_idle from the top. */
78 process_routine_kmsg();
80 cpu_bored(); /* call out to the ksched */
81 /* cpu_halt() atomically turns on interrupts and halts the core.
82 * Important to do this, since we could have a RKM come in via
83 * an interrupt right while PRKM is returning, and we wouldn't
84 * catch it. When it returns, IRQs are back off. */
85 __set_cpu_state(pcpui, CPU_STATE_IDLE);
87 __set_cpu_state(pcpui, CPU_STATE_KERNEL);
95 __reset_stack_pointer(0, get_stack_top(), __smp_idle);
98 /* Arch-independent per-cpu initialization. This will call the arch dependent
100 void smp_percpu_init(void)
102 uint32_t coreid = core_id();
103 struct per_cpu_info *pcpui = &per_cpu_info[coreid];
105 struct kthread *kthread;
106 /* Don't initialize __ctx_depth here, since it is already 1 (at least on
107 * x86), since this runs in irq context. */
109 __arch_pcpu_init(coreid);
110 /* init our kthread (tracks our currently running context) */
111 kthread = __kthread_zalloc();
112 /* assumes we're on the 1st page */
113 kthread->stacktop = get_stack_top();
114 pcpui->cur_kthread = kthread;
115 /* Treat the startup threads as ktasks. This will last until smp_idle
116 * when they clear it, either in anticipation of being a user-backing
117 * kthread or to handle an RKM. */
118 kthread->flags = KTH_KTASK_FLAGS;
119 per_cpu_info[coreid].spare = 0;
120 /* Init relevant lists */
121 spinlock_init_irqsave(&per_cpu_info[coreid].immed_amsg_lock);
122 STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs);
123 spinlock_init_irqsave(&per_cpu_info[coreid].routine_amsg_lock);
124 STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs);
125 init_timer_chain(&this_pcpui_var(tchain), set_pcpu_alarm_interrupt);
126 /* Init generic tracing ring */
127 trace_buf = kpage_alloc_addr();
129 trace_ring_init(&pcpui->traces, trace_buf, PGSIZE,
130 sizeof(struct pcpu_trace_event));
131 for (int i = 0; i < NR_CPU_STATES; i++)
132 pcpui->state_ticks[i] = 0;
133 pcpui->last_tick_cnt = read_tsc();
134 /* Core 0 is in the KERNEL state, called from smp_boot. The other cores
135 * are too, at least on x86, where we were called from asm (woken by
137 pcpui->cpu_state = CPU_STATE_KERNEL;
138 /* Enable full lock debugging, after all pcpui work is done */
139 pcpui->__lock_checking_enabled = 1;
142 /* it's actually okay to set the state to the existing state. originally, it
143 * was a bug in the state tracking, but it is possible, at least on x86, to have
144 * a halted core (state IDLE) get woken up by an IRQ that does not trigger the
145 * IRQ handling state. for example, there is the I_POKE_CORE ipi. smp_idle
146 * will just sleep again, and reset the state from IDLE to IDLE. */
147 void __set_cpu_state(struct per_cpu_info *pcpui, int state)
151 assert(!irq_is_enabled());
152 /* TODO: could put in an option to enable/disable state tracking. */
153 now_ticks = read_tsc();
154 pcpui->state_ticks[pcpui->cpu_state] += now_ticks -
155 pcpui->last_tick_cnt;
156 /* TODO: if the state was user, we could account for the vcore's time,
157 * similar to the total_ticks in struct vcore. the difference is that
158 * the total_ticks tracks the vcore's virtual time, while this tracks
159 * user time. something like vcore->user_ticks. */
160 pcpui->cpu_state = state;
161 pcpui->last_tick_cnt = now_ticks;
164 void reset_cpu_state_ticks(int coreid)
166 struct per_cpu_info *pcpui = &per_cpu_info[coreid];
169 if (coreid >= num_cores)
171 /* need to update last_tick_cnt, so the current value doesn't get added
172 * in next time we update */
173 now_ticks = read_tsc();
174 for (int i = 0; i < NR_CPU_STATES; i++) {
175 pcpui->state_ticks[i] = 0;
176 pcpui->last_tick_cnt = now_ticks;
180 /* PCPUI Trace Rings: */
182 static void pcpui_trace_kmsg_handler(void *event, void *data)
184 struct pcpu_trace_event *te = (struct pcpu_trace_event*)event;
188 printk("\tKMSG %p: %s\n", addr, get_fn_name(addr));
191 static void pcpui_trace_locks_handler(void *event, void *data)
193 struct pcpu_trace_event *te = (struct pcpu_trace_event*)event;
194 const char *func_name;
195 uintptr_t lock_addr = te->arg1;
197 if (lock_addr > KERN_LOAD_ADDR)
198 func_name = get_fn_name(lock_addr);
200 func_name = "Dynamic lock";
202 printk("Time %uus, lock %p (%s)\n", te->arg0, lock_addr, func_name);
204 spinlock_debug((spinlock_t*)lock_addr);
208 /* Add specific trace handlers here: */
209 trace_handler_t pcpui_tr_handlers[PCPUI_NR_TYPES] = {
211 pcpui_trace_kmsg_handler,
212 pcpui_trace_locks_handler,
215 /* Generic handler for the pcpui ring. Will switch out to the appropriate
217 static void pcpui_trace_fn(void *event, void *data)
219 struct pcpu_trace_event *te = (struct pcpu_trace_event*)event;
220 int desired_type = (int)(long)data;
222 if (te->type >= PCPUI_NR_TYPES)
223 printk("Bad trace type %d\n", te->type);
224 /* desired_type == 0 means all types */
225 if (desired_type && desired_type != te->type)
227 if (pcpui_tr_handlers[te->type])
228 pcpui_tr_handlers[te->type](event, data);
231 void pcpui_tr_foreach(int coreid, int type)
233 struct trace_ring *tr = &per_cpu_info[coreid].traces;
235 printk("\n\nTrace Ring on Core %d\n--------------\n", coreid);
236 trace_ring_foreach(tr, pcpui_trace_fn, (void*)(long)type);
239 void pcpui_tr_foreach_all(int type)
241 for (int i = 0; i < num_cores; i++)
242 pcpui_tr_foreach(i, type);
245 void pcpui_tr_reset_all(void)
247 for (int i = 0; i < num_cores; i++)
248 trace_ring_reset(&per_cpu_info[i].traces);
251 void pcpui_tr_reset_and_clear_all(void)
253 for (int i = 0; i < num_cores; i++)
254 trace_ring_reset_and_clear(&per_cpu_info[i].traces);
257 static void smp_do_core_work(uint32_t srcid, long a0, long a1, long a2)
259 struct all_cpu_work *acw = (struct all_cpu_work *) a0;
261 acw->func(acw->opaque);
262 completion_complete(&acw->comp, 1);
265 void smp_do_in_cores(const struct core_set *cset, void (*func)(void *),
269 struct all_cpu_work acw;
271 memset(&acw, 0, sizeof(acw));
272 completion_init(&acw.comp, core_set_remote_count(cset));
276 for (int i = 0; i < num_cores; i++) {
277 if (core_set_getcpu(cset, i)) {
281 send_kernel_message(i, smp_do_core_work,
286 completion_wait(&acw.comp);