01df275f13108d9d5c34ca13dfcd85a5fe596045
[akaros.git] / user / parlib / include / parlib / vcore.h
1 #pragma once
2
3 #include <parlib/arch/vcore.h>
4 #include <parlib/arch/atomic.h>
5 #include <sys/param.h>
6 #include <string.h>
7 #include <parlib/timing.h>
8 #include <parlib/common.h>
9
10 __BEGIN_DECLS
11
12 /*****************************************************************************/
13 /* TODO: This is a complete hack, but necessary for vcore stuff to work for now
14  * The issue is that exit sometimes calls sys_yield(), and we can't recover from
15  * that properly under our vcore model (we shouldn't though).  We really need to
16  * rethink what sys_yield 'should' do when in multicore mode, or else come up 
17  * with a different syscall entirely. */
18 #undef exit
19 extern void _exit (int status);
20 extern void exit (int __status) __THROW __attribute__ ((__noreturn__));
21 #define exit(status) _exit(status)
22 /*****************************************************************************/
23
24 #define TRANSITION_STACK_PAGES 2
25 #define TRANSITION_STACK_SIZE (TRANSITION_STACK_PAGES*PGSIZE)
26
27 /* Defined in vcore.c */
28 void __attribute__((noreturn)) vcore_entry(void);
29 extern __thread bool __vcore_context;
30 extern __thread int __vcoreid;
31 extern __thread struct syscall __vcore_one_sysc;        /* see sys_change_vcore */
32
33 /* Arch specific entry from the kernel */
34 void __attribute__((noreturn)) __kernel_vcore_entry(void);
35
36 /* Vcore API functions */
37 static inline uint32_t max_vcores(void);
38 static inline uint32_t num_vcores(void);
39 static inline int vcore_id(void);
40 static inline bool in_vcore_context(void);
41 static inline bool in_multi_mode(void);
42 static inline void __enable_notifs(uint32_t vcoreid);
43 static inline void __disable_notifs(uint32_t vcoreid);
44 static inline bool notif_is_enabled(uint32_t vcoreid);
45 static inline bool vcore_is_mapped(uint32_t vcoreid);
46 static inline bool vcore_is_preempted(uint32_t vcoreid);
47 static inline struct preempt_data *vcpd_of(uint32_t vcoreid);
48 static inline bool preempt_is_pending(uint32_t vcoreid);
49 static inline bool __preempt_is_pending(uint32_t vcoreid);
50 static inline void *get_vcpd_tls_desc(uint32_t vcoreid);
51 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc);
52 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid);
53 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid);
54 void vcore_lib_init(void);
55 void vcore_change_to_m(void);
56 int vcore_request(long nr_new_vcores);
57 void vcore_yield(bool preempt_pending);
58 void vcore_reenter(void (*entry_func)(void));
59 void enable_notifs(uint32_t vcoreid);
60 void disable_notifs(uint32_t vcoreid);
61 void vcore_idle(void);
62 void ensure_vcore_runs(uint32_t vcoreid);
63 void cpu_relax_vc(uint32_t vcoreid);
64 uint32_t get_vcoreid(void);
65 bool check_vcoreid(const char *str, uint32_t vcoreid);
66 void print_hw_tf(struct hw_trapframe *tf);
67 void print_sw_tf(struct sw_trapframe *sw_tf);
68 void print_user_context(struct user_context *ctx);
69
70 /* This works so long as we don't dlopen parlib (which we never do) */
71 #define get_tlsvar_linaddr(_vcoreid, _var)                                     \
72 ({                                                                             \
73         uintptr_t vc_tls_desc = (uintptr_t)get_vcpd_tls_desc(_vcoreid);            \
74         uintptr_t var_off = (uintptr_t)&_var - (uintptr_t)get_tls_desc();          \
75         (typeof(_var) *)(vc_tls_desc + var_off);                                   \
76 })
77
78 /* Static inlines */
79 static inline uint32_t max_vcores(void)
80 {
81         return MAX(1, __procinfo.max_vcores);
82 }
83
84 static inline uint32_t num_vcores(void)
85 {
86         return __procinfo.num_vcores;
87 }
88
89 static inline int vcore_id(void)
90 {
91         return __vcoreid;
92 }
93
94 static inline bool in_vcore_context(void)
95 {
96         return __vcore_context;
97 }
98
99 static inline bool in_multi_mode(void)
100 {
101         return __procinfo.is_mcp;
102 }
103
104 /* Only call this if you know what you are doing. */
105 static inline void __enable_notifs(uint32_t vcoreid)
106 {
107         vcpd_of(vcoreid)->notif_disabled = FALSE;
108 }
109
110 static inline void __disable_notifs(uint32_t vcoreid)
111 {
112         vcpd_of(vcoreid)->notif_disabled = TRUE;
113 }
114
115 static inline bool notif_is_enabled(uint32_t vcoreid)
116 {
117         return !vcpd_of(vcoreid)->notif_disabled;
118 }
119
120 static inline bool vcore_is_mapped(uint32_t vcoreid)
121 {
122         return __procinfo.vcoremap[vcoreid].valid;
123 }
124
125 /* We could also check for VC_K_LOCK, but that's a bit much. */
126 static inline bool vcore_is_preempted(uint32_t vcoreid)
127 {
128         struct preempt_data *vcpd = vcpd_of(vcoreid);
129         return atomic_read(&vcpd->flags) & VC_PREEMPTED;
130 }
131
132 static inline struct preempt_data *vcpd_of(uint32_t vcoreid)
133 {
134         return &__procdata.vcore_preempt_data[vcoreid];
135 }
136
137 /* Uthread's can call this in case they care if a preemption is coming.  If a
138  * preempt is incoming, this will return TRUE, if you are in uthread context.  A
139  * reasonable response for a uthread is to yield, and vcore_entry will deal with
140  * the preempt pending.
141  *
142  * If you call this from vcore context, it will do nothing.  In general, it's
143  * not safe to just yield (or do whatever you plan on doing) from arbitrary
144  * places in vcore context.  So we just lie about PP. */
145 static inline bool preempt_is_pending(uint32_t vcoreid)
146 {
147         if (in_vcore_context())
148                 return FALSE;
149         return __preempt_is_pending(vcoreid);
150 }
151
152 static inline bool __preempt_is_pending(uint32_t vcoreid)
153 {
154         return __procinfo.vcoremap[vcoreid].preempt_pending;
155 }
156
157 /* The kernel interface uses uintptr_t, but we have a lot of older code that
158  * uses void *, hence the casting. */
159 static inline void *get_vcpd_tls_desc(uint32_t vcoreid)
160 {
161         return (void*)__procdata.vcore_preempt_data[vcoreid].vcore_tls_desc;
162 }
163
164 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc)
165 {
166         __procdata.vcore_preempt_data[vcoreid].vcore_tls_desc = (uintptr_t)tls_desc;
167 }
168
169 static inline uint64_t vcore_account_resume_ticks(uint32_t vcoreid)
170 {
171         return __procinfo.vcoremap[vcoreid].resume_ticks;
172 }
173
174 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid)
175 {
176         return tsc2nsec(vcore_account_resume_ticks(vcoreid));
177 }
178
179 static inline uint64_t vcore_account_total_ticks(uint32_t vcoreid)
180 {
181         return __procinfo.vcoremap[vcoreid].total_ticks;
182 }
183
184 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid)
185 {
186         return tsc2nsec(vcore_account_total_ticks(vcoreid));
187 }
188
189 static inline uint64_t vcore_account_uptime_ticks(uint32_t vcoreid)
190 {
191         uint64_t resume = __procinfo.vcoremap[vcoreid].resume_ticks; 
192         uint64_t total = __procinfo.vcoremap[vcoreid].total_ticks; 
193         uint64_t now = read_tsc();
194         return now - resume + total;
195 }
196
197 static inline uint64_t vcore_account_uptime_nsec(uint32_t vcoreid)
198 {
199         return tsc2nsec(vcore_account_uptime_ticks(vcoreid));
200 }
201
202 #ifndef __PIC__
203
204 #define begin_safe_access_tls_vars()
205
206 #define end_safe_access_tls_vars()
207
208 #else
209
210 #include <features.h>
211
212 /* These macro acrobatics trick the compiler into not caching the (linear)
213  * address of TLS variables across loads/stores of the TLS descriptor, in lieu
214  * of a "TLS cmb()". */
215 #define begin_safe_access_tls_vars()                                           \
216 {                                                                              \
217         void __attribute__((noinline, optimize("O0")))                             \
218         safe_access_tls_var_internal() {                                           \
219                 asm("");                                                               \
220
221 #define end_safe_access_tls_vars()                                             \
222         } safe_access_tls_var_internal();                                          \
223 }
224
225 #endif // __PIC__
226
227 /* Switches into the TLS 'tls_desc'.  Capable of being called from either
228  * uthread or vcore context.  Pairs with end_access_tls_vars(). */
229 #define begin_access_tls_vars(tls_desc)                                        \
230 {                                                                              \
231         struct uthread *caller;                                                    \
232         uint32_t vcoreid;                                                          \
233         void *temp_tls_desc;                                                       \
234         bool invcore = in_vcore_context();                                         \
235         if (!invcore) {                                                            \
236                 caller = current_uthread;                                              \
237                 /* If you have no current_uthread, you might be called too early in the
238                  * process's lifetime.  Make sure something like uthread_slim_init() has
239                  * been run. */                                                        \
240                 assert(caller);                                                        \
241                 /* We need to disable notifs here (in addition to not migrating), since
242                  * we could get interrupted when we're in the other TLS, and when the
243                  * vcore restarts us, it will put us in our old TLS, not the one we were
244                  * in when we were interrupted.  We need to not migrate, since once we
245                  * know the vcoreid, we depend on being on the same vcore throughout.*/\
246                 caller->flags |= UTHREAD_DONT_MIGRATE;                                 \
247                 /* Not concerned about cross-core memory ordering, so no CPU mbs needed.
248                  * The cmb is to prevent the compiler from issuing the vcore read before
249                  * the DONT_MIGRATE write. */                                          \
250                 cmb();                                                                 \
251                 vcoreid = vcore_id();                                                  \
252                 disable_notifs(vcoreid);                                               \
253         } else { /* vcore context */                                               \
254                 vcoreid = vcore_id();                                                  \
255         }                                                                          \
256         temp_tls_desc = get_tls_desc();                                            \
257         set_tls_desc(tls_desc);                                                    \
258         begin_safe_access_tls_vars();
259
260 #define end_access_tls_vars()                                                  \
261         end_safe_access_tls_vars();                                                \
262         set_tls_desc(temp_tls_desc);                                               \
263         if (!invcore) {                                                            \
264                 /* Note we reenable migration before enabling notifs, which is reverse
265                  * from how we disabled notifs.  We must enabling migration before
266                  * enabling notifs.  See 6c7fb12 and 5e4825eb4 for details. */         \
267                 caller->flags &= ~UTHREAD_DONT_MIGRATE;                                \
268                 cmb();  /* turn off DONT_MIGRATE before enabling notifs */             \
269                 enable_notifs(vcoreid);                                                \
270         }                                                                          \
271 }
272
273 #define safe_set_tls_var(name, val)                                            \
274 ({                                                                             \
275         begin_safe_access_tls_vars();                                              \
276         name = val;                                                                \
277         end_safe_access_tls_vars();                                                \
278 })
279
280 #define safe_get_tls_var(name)                                                 \
281 ({                                                                             \
282         typeof(name) __val;                                                        \
283         begin_safe_access_tls_vars();                                              \
284         __val = name;                                                              \
285         end_safe_access_tls_vars();                                                \
286         __val;                                                                     \
287 })
288
289 #define vcore_set_tls_var(name, val)                                           \
290 ({                                                                             \
291         typeof(val) __val = val;                                                   \
292         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
293         name = __val;                                                              \
294         end_access_tls_vars();                                                     \
295 })
296
297 #define vcore_get_tls_var(name)                                                \
298 ({                                                                             \
299         typeof(name) val;                                                          \
300         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
301         val = name;                                                                \
302         end_access_tls_vars();                                                     \
303         val;                                                                       \
304 })
305
306 __END_DECLS