Rename vcore_request() -> vcore_request_more()
[akaros.git] / user / parlib / include / parlib / vcore.h
1 #pragma once
2
3 #include <parlib/arch/vcore.h>
4 #include <parlib/arch/atomic.h>
5 #include <sys/param.h>
6 #include <string.h>
7 #include <parlib/timing.h>
8 #include <parlib/common.h>
9
10 __BEGIN_DECLS
11
12 /*****************************************************************************/
13 /* TODO: This is a complete hack, but necessary for vcore stuff to work for now
14  * The issue is that exit sometimes calls sys_yield(), and we can't recover from
15  * that properly under our vcore model (we shouldn't though).  We really need to
16  * rethink what sys_yield 'should' do when in multicore mode, or else come up 
17  * with a different syscall entirely. */
18 #undef exit
19 extern void _exit (int status);
20 extern void exit (int __status) __THROW __attribute__ ((__noreturn__));
21 #define exit(status) _exit(status)
22 /*****************************************************************************/
23
24 #define TRANSITION_STACK_PAGES 2
25 #define TRANSITION_STACK_SIZE (TRANSITION_STACK_PAGES*PGSIZE)
26
27 /* Defined in vcore.c */
28 void __attribute__((noreturn)) vcore_entry(void);
29 extern __thread bool __vcore_context;
30 extern __thread int __vcoreid;
31 extern __thread struct syscall __vcore_one_sysc;        /* see sys_change_vcore */
32
33 /* Arch specific entry from the kernel */
34 void __attribute__((noreturn)) __kernel_vcore_entry(void);
35
36 /* Vcore API functions */
37 static inline uint32_t max_vcores(void);
38 static inline uint32_t num_vcores(void);
39 static inline int vcore_id(void);
40 static inline bool in_vcore_context(void);
41 static inline bool in_multi_mode(void);
42 static inline void __enable_notifs(uint32_t vcoreid);
43 static inline void __disable_notifs(uint32_t vcoreid);
44 static inline bool notif_is_enabled(uint32_t vcoreid);
45 static inline bool vcore_is_mapped(uint32_t vcoreid);
46 static inline bool vcore_is_preempted(uint32_t vcoreid);
47 static inline struct preempt_data *vcpd_of(uint32_t vcoreid);
48 static inline bool preempt_is_pending(uint32_t vcoreid);
49 static inline bool __preempt_is_pending(uint32_t vcoreid);
50 static inline void *get_vcpd_tls_desc(uint32_t vcoreid);
51 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc);
52 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid);
53 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid);
54 void vcore_lib_init(void);
55 void vcore_change_to_m(void);
56 int vcore_request_more(long nr_new_vcores);
57 void vcore_yield(bool preempt_pending);
58 void vcore_reenter(void (*entry_func)(void));
59 void enable_notifs(uint32_t vcoreid);
60 void disable_notifs(uint32_t vcoreid);
61 void vcore_idle(void);
62 void ensure_vcore_runs(uint32_t vcoreid);
63 void cpu_relax_vc(uint32_t vcoreid);
64 uint32_t get_vcoreid(void);
65 bool check_vcoreid(const char *str, uint32_t vcoreid);
66 void __attribute__((noreturn)) vcore_yield_or_restart(void);
67 void print_hw_tf(struct hw_trapframe *tf);
68 void print_sw_tf(struct sw_trapframe *sw_tf);
69 void print_user_context(struct user_context *ctx);
70
71 /* This works so long as we don't dlopen parlib (which we never do) */
72 #define get_tlsvar_linaddr(_vcoreid, _var)                                     \
73 ({                                                                             \
74         uintptr_t vc_tls_desc = (uintptr_t)get_vcpd_tls_desc(_vcoreid);            \
75         uintptr_t var_off = (uintptr_t)&_var - (uintptr_t)get_tls_desc();          \
76         (typeof(_var) *)(vc_tls_desc + var_off);                                   \
77 })
78
79 /* Static inlines */
80 static inline uint32_t max_vcores(void)
81 {
82         return MAX(1, __procinfo.max_vcores);
83 }
84
85 static inline uint32_t num_vcores(void)
86 {
87         return __procinfo.num_vcores;
88 }
89
90 static inline int vcore_id(void)
91 {
92         return __vcoreid;
93 }
94
95 static inline bool in_vcore_context(void)
96 {
97         return __vcore_context;
98 }
99
100 static inline bool in_multi_mode(void)
101 {
102         return __procinfo.is_mcp;
103 }
104
105 /* Only call this if you know what you are doing. */
106 static inline void __enable_notifs(uint32_t vcoreid)
107 {
108         vcpd_of(vcoreid)->notif_disabled = FALSE;
109 }
110
111 static inline void __disable_notifs(uint32_t vcoreid)
112 {
113         vcpd_of(vcoreid)->notif_disabled = TRUE;
114 }
115
116 static inline bool notif_is_enabled(uint32_t vcoreid)
117 {
118         return !vcpd_of(vcoreid)->notif_disabled;
119 }
120
121 static inline bool vcore_is_mapped(uint32_t vcoreid)
122 {
123         return __procinfo.vcoremap[vcoreid].valid;
124 }
125
126 /* We could also check for VC_K_LOCK, but that's a bit much. */
127 static inline bool vcore_is_preempted(uint32_t vcoreid)
128 {
129         struct preempt_data *vcpd = vcpd_of(vcoreid);
130         return atomic_read(&vcpd->flags) & VC_PREEMPTED;
131 }
132
133 static inline struct preempt_data *vcpd_of(uint32_t vcoreid)
134 {
135         return &__procdata.vcore_preempt_data[vcoreid];
136 }
137
138 /* Uthread's can call this in case they care if a preemption is coming.  If a
139  * preempt is incoming, this will return TRUE, if you are in uthread context.  A
140  * reasonable response for a uthread is to yield, and vcore_entry will deal with
141  * the preempt pending.
142  *
143  * If you call this from vcore context, it will do nothing.  In general, it's
144  * not safe to just yield (or do whatever you plan on doing) from arbitrary
145  * places in vcore context.  So we just lie about PP. */
146 static inline bool preempt_is_pending(uint32_t vcoreid)
147 {
148         if (in_vcore_context())
149                 return FALSE;
150         return __preempt_is_pending(vcoreid);
151 }
152
153 static inline bool __preempt_is_pending(uint32_t vcoreid)
154 {
155         return __procinfo.vcoremap[vcoreid].preempt_pending;
156 }
157
158 /* The kernel interface uses uintptr_t, but we have a lot of older code that
159  * uses void *, hence the casting. */
160 static inline void *get_vcpd_tls_desc(uint32_t vcoreid)
161 {
162         return (void*)__procdata.vcore_preempt_data[vcoreid].vcore_tls_desc;
163 }
164
165 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc)
166 {
167         __procdata.vcore_preempt_data[vcoreid].vcore_tls_desc = (uintptr_t)tls_desc;
168 }
169
170 static inline uint64_t vcore_account_resume_ticks(uint32_t vcoreid)
171 {
172         return __procinfo.vcoremap[vcoreid].resume_ticks;
173 }
174
175 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid)
176 {
177         return tsc2nsec(vcore_account_resume_ticks(vcoreid));
178 }
179
180 static inline uint64_t vcore_account_total_ticks(uint32_t vcoreid)
181 {
182         return __procinfo.vcoremap[vcoreid].total_ticks;
183 }
184
185 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid)
186 {
187         return tsc2nsec(vcore_account_total_ticks(vcoreid));
188 }
189
190 static inline uint64_t vcore_account_uptime_ticks(uint32_t vcoreid)
191 {
192         uint64_t resume = __procinfo.vcoremap[vcoreid].resume_ticks; 
193         uint64_t total = __procinfo.vcoremap[vcoreid].total_ticks; 
194         uint64_t now = read_tsc();
195         return now - resume + total;
196 }
197
198 static inline uint64_t vcore_account_uptime_nsec(uint32_t vcoreid)
199 {
200         return tsc2nsec(vcore_account_uptime_ticks(vcoreid));
201 }
202
203 #ifndef __PIC__
204
205 #define begin_safe_access_tls_vars()
206
207 #define end_safe_access_tls_vars()
208
209 #else
210
211 #include <features.h>
212
213 /* These macro acrobatics trick the compiler into not caching the (linear)
214  * address of TLS variables across loads/stores of the TLS descriptor, in lieu
215  * of a "TLS cmb()". */
216 #define begin_safe_access_tls_vars()                                           \
217 {                                                                              \
218         void __attribute__((noinline, optimize("O0")))                             \
219         safe_access_tls_var_internal() {                                           \
220                 asm("");                                                               \
221
222 #define end_safe_access_tls_vars()                                             \
223         } safe_access_tls_var_internal();                                          \
224 }
225
226 #endif // __PIC__
227
228 /* Switches into the TLS 'tls_desc'.  Capable of being called from either
229  * uthread or vcore context.  Pairs with end_access_tls_vars(). */
230 #define begin_access_tls_vars(tls_desc)                                        \
231 {                                                                              \
232         struct uthread *caller;                                                    \
233         uint32_t vcoreid;                                                          \
234         void *temp_tls_desc;                                                       \
235         bool invcore = in_vcore_context();                                         \
236         if (!invcore) {                                                            \
237                 caller = current_uthread;                                              \
238                 /* If you have no current_uthread, you might be called too early in the
239                  * process's lifetime.  Make sure something like uthread_slim_init() has
240                  * been run. */                                                        \
241                 assert(caller);                                                        \
242                 /* We need to disable notifs here (in addition to not migrating), since
243                  * we could get interrupted when we're in the other TLS, and when the
244                  * vcore restarts us, it will put us in our old TLS, not the one we were
245                  * in when we were interrupted.  We need to not migrate, since once we
246                  * know the vcoreid, we depend on being on the same vcore throughout.*/\
247                 caller->flags |= UTHREAD_DONT_MIGRATE;                                 \
248                 /* Not concerned about cross-core memory ordering, so no CPU mbs needed.
249                  * The cmb is to prevent the compiler from issuing the vcore read before
250                  * the DONT_MIGRATE write. */                                          \
251                 cmb();                                                                 \
252                 vcoreid = vcore_id();                                                  \
253                 disable_notifs(vcoreid);                                               \
254         } else { /* vcore context */                                               \
255                 vcoreid = vcore_id();                                                  \
256         }                                                                          \
257         temp_tls_desc = get_tls_desc();                                            \
258         set_tls_desc(tls_desc);                                                    \
259         begin_safe_access_tls_vars();
260
261 #define end_access_tls_vars()                                                  \
262         end_safe_access_tls_vars();                                                \
263         set_tls_desc(temp_tls_desc);                                               \
264         if (!invcore) {                                                            \
265                 /* Note we reenable migration before enabling notifs, which is reverse
266                  * from how we disabled notifs.  We must enabling migration before
267                  * enabling notifs.  See 6c7fb12 and 5e4825eb4 for details. */         \
268                 caller->flags &= ~UTHREAD_DONT_MIGRATE;                                \
269                 cmb();  /* turn off DONT_MIGRATE before enabling notifs */             \
270                 enable_notifs(vcoreid);                                                \
271         }                                                                          \
272 }
273
274 #define safe_set_tls_var(name, val)                                            \
275 ({                                                                             \
276         begin_safe_access_tls_vars();                                              \
277         name = val;                                                                \
278         end_safe_access_tls_vars();                                                \
279 })
280
281 #define safe_get_tls_var(name)                                                 \
282 ({                                                                             \
283         typeof(name) __val;                                                        \
284         begin_safe_access_tls_vars();                                              \
285         __val = name;                                                              \
286         end_safe_access_tls_vars();                                                \
287         __val;                                                                     \
288 })
289
290 #define vcore_set_tls_var(name, val)                                           \
291 ({                                                                             \
292         typeof(val) __val = val;                                                   \
293         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
294         name = __val;                                                              \
295         end_access_tls_vars();                                                     \
296 })
297
298 #define vcore_get_tls_var(name)                                                \
299 ({                                                                             \
300         typeof(name) val;                                                          \
301         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
302         val = name;                                                                \
303         end_access_tls_vars();                                                     \
304         val;                                                                       \
305 })
306
307 __END_DECLS