The initrd now works.
[akaros.git] / user / parlib / include / parlib / vcore.h
1 #pragma once
2
3 #include <parlib/arch/vcore.h>
4 #include <parlib/arch/atomic.h>
5 #include <sys/param.h>
6 #include <string.h>
7 #include <parlib/timing.h>
8 #include <parlib/common.h>
9
10 __BEGIN_DECLS
11
12 #define TRANSITION_STACK_PAGES 2
13 #define TRANSITION_STACK_SIZE (TRANSITION_STACK_PAGES*PGSIZE)
14
15 /* Defined in vcore.c */
16 void __attribute__((noreturn)) vcore_entry(void);
17 extern __thread bool __vcore_context;
18 extern __thread int __vcoreid;
19 extern __thread struct syscall __vcore_one_sysc;        /* see sys_change_vcore */
20
21 /* Arch specific entry from the kernel */
22 void __attribute__((noreturn)) __kernel_vcore_entry(void);
23
24 /* Vcore API functions */
25 static inline uint32_t max_vcores(void);
26 static inline uint32_t num_vcores(void);
27 static inline int vcore_id(void);
28 static inline bool in_vcore_context(void);
29 static inline bool in_multi_mode(void);
30 static inline void __enable_notifs(uint32_t vcoreid);
31 static inline void __disable_notifs(uint32_t vcoreid);
32 static inline bool notif_is_enabled(uint32_t vcoreid);
33 static inline bool vcore_is_mapped(uint32_t vcoreid);
34 static inline bool vcore_is_preempted(uint32_t vcoreid);
35 static inline struct preempt_data *vcpd_of(uint32_t vcoreid);
36 static inline bool preempt_is_pending(uint32_t vcoreid);
37 static inline bool __preempt_is_pending(uint32_t vcoreid);
38 static inline void *get_vcpd_tls_desc(uint32_t vcoreid);
39 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc);
40 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid);
41 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid);
42 static inline void cpu_relax_any(void);
43 void vcore_lib_init(void);
44 bool __in_fake_parlib(void);
45 void vcore_change_to_m(void);
46 void vcore_request_more(long nr_new_vcores);
47 void vcore_request_total(long nr_vcores_wanted);
48 void vcore_yield(bool preempt_pending);
49 void vcore_reenter(void (*entry_func)(void));
50 void enable_notifs(uint32_t vcoreid);
51 void disable_notifs(uint32_t vcoreid);
52 void vcore_idle(void);
53 void ensure_vcore_runs(uint32_t vcoreid);
54 void cpu_relax_vc(uint32_t vcoreid);
55 uint32_t get_vcoreid(void);
56 bool check_vcoreid(const char *str, uint32_t vcoreid);
57 void __attribute__((noreturn)) vcore_yield_or_restart(void);
58
59 /* This works so long as we don't dlopen parlib (which we never do) */
60 #define get_tlsvar_linaddr(_vcoreid, _var)                                     \
61 ({                                                                             \
62         uintptr_t vc_tls_desc = (uintptr_t)get_vcpd_tls_desc(_vcoreid);            \
63         uintptr_t var_off = (uintptr_t)&_var - (uintptr_t)get_tls_desc();          \
64         (typeof(_var) *)(vc_tls_desc + var_off);                                   \
65 })
66
67 /* Static inlines */
68 static inline uint32_t max_vcores(void)
69 {
70         return MAX(1, __procinfo.max_vcores);
71 }
72
73 static inline uint32_t num_vcores(void)
74 {
75         return __procinfo.num_vcores;
76 }
77
78 static inline int vcore_id(void)
79 {
80         return __vcoreid;
81 }
82
83 static inline bool in_vcore_context(void)
84 {
85         return __vcore_context;
86 }
87
88 static inline bool in_multi_mode(void)
89 {
90         return __procinfo.is_mcp;
91 }
92
93 /* Only call this if you know what you are doing. */
94 static inline void __enable_notifs(uint32_t vcoreid)
95 {
96         vcpd_of(vcoreid)->notif_disabled = FALSE;
97 }
98
99 static inline void __disable_notifs(uint32_t vcoreid)
100 {
101         vcpd_of(vcoreid)->notif_disabled = TRUE;
102 }
103
104 static inline bool notif_is_enabled(uint32_t vcoreid)
105 {
106         return !vcpd_of(vcoreid)->notif_disabled;
107 }
108
109 static inline bool vcore_is_mapped(uint32_t vcoreid)
110 {
111         return __procinfo.vcoremap[vcoreid].valid;
112 }
113
114 /* We could also check for VC_K_LOCK, but that's a bit much. */
115 static inline bool vcore_is_preempted(uint32_t vcoreid)
116 {
117         struct preempt_data *vcpd = vcpd_of(vcoreid);
118         return atomic_read(&vcpd->flags) & VC_PREEMPTED;
119 }
120
121 static inline struct preempt_data *vcpd_of(uint32_t vcoreid)
122 {
123         return &__procdata.vcore_preempt_data[vcoreid];
124 }
125
126 /* Uthread's can call this in case they care if a preemption is coming.  If a
127  * preempt is incoming, this will return TRUE, if you are in uthread context.  A
128  * reasonable response for a uthread is to yield, and vcore_entry will deal with
129  * the preempt pending.
130  *
131  * If you call this from vcore context, it will do nothing.  In general, it's
132  * not safe to just yield (or do whatever you plan on doing) from arbitrary
133  * places in vcore context.  So we just lie about PP. */
134 static inline bool preempt_is_pending(uint32_t vcoreid)
135 {
136         if (in_vcore_context())
137                 return FALSE;
138         return __preempt_is_pending(vcoreid);
139 }
140
141 static inline bool __preempt_is_pending(uint32_t vcoreid)
142 {
143         return __procinfo.vcoremap[vcoreid].preempt_pending;
144 }
145
146 /* The kernel interface uses uintptr_t, but we have a lot of older code that
147  * uses void *, hence the casting. */
148 static inline void *get_vcpd_tls_desc(uint32_t vcoreid)
149 {
150         return (void*)__procdata.vcore_preempt_data[vcoreid].vcore_tls_desc;
151 }
152
153 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc)
154 {
155         __procdata.vcore_preempt_data[vcoreid].vcore_tls_desc = (uintptr_t)tls_desc;
156 }
157
158 static inline uint64_t vcore_account_resume_ticks(uint32_t vcoreid)
159 {
160         return __procinfo.vcoremap[vcoreid].resume_ticks;
161 }
162
163 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid)
164 {
165         return tsc2nsec(vcore_account_resume_ticks(vcoreid));
166 }
167
168 static inline uint64_t vcore_account_total_ticks(uint32_t vcoreid)
169 {
170         return __procinfo.vcoremap[vcoreid].total_ticks;
171 }
172
173 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid)
174 {
175         return tsc2nsec(vcore_account_total_ticks(vcoreid));
176 }
177
178 static inline uint64_t vcore_account_uptime_ticks(uint32_t vcoreid)
179 {
180         uint64_t resume = __procinfo.vcoremap[vcoreid].resume_ticks; 
181         uint64_t total = __procinfo.vcoremap[vcoreid].total_ticks; 
182         uint64_t now = read_tsc();
183         return now - resume + total;
184 }
185
186 static inline uint64_t vcore_account_uptime_nsec(uint32_t vcoreid)
187 {
188         return tsc2nsec(vcore_account_uptime_ticks(vcoreid));
189 }
190
191 static inline void cpu_relax_any(void)
192 {
193         return cpu_relax_vc(vcore_id());
194 }
195
196 #ifndef __PIC__
197
198 #define begin_safe_access_tls_vars()
199
200 #define end_safe_access_tls_vars()
201
202 #else
203
204 #include <features.h>
205
206 /* These macro acrobatics trick the compiler into not caching the (linear)
207  * address of TLS variables across loads/stores of the TLS descriptor, in lieu
208  * of a "TLS cmb()". */
209 #define begin_safe_access_tls_vars()                                           \
210 {                                                                              \
211         void __attribute__((noinline, optimize("O0")))                             \
212         safe_access_tls_var_internal() {                                           \
213                 asm("");                                                               \
214
215 #define end_safe_access_tls_vars()                                             \
216         } safe_access_tls_var_internal();                                          \
217 }
218
219 #endif // __PIC__
220
221 /* Switches into the TLS 'tls_desc'.  Capable of being called from either
222  * uthread or vcore context.  Pairs with end_access_tls_vars(). */
223 #define begin_access_tls_vars(tls_desc)                                        \
224 {                                                                              \
225         struct uthread *caller;                                                    \
226         uint32_t vcoreid;                                                          \
227         void *temp_tls_desc;                                                       \
228         bool invcore = in_vcore_context();                                         \
229         if (!invcore) {                                                            \
230                 caller = current_uthread;                                              \
231                 /* If you have no current_uthread, you might be called too early in the
232                  * process's lifetime.  Make sure something like uthread_slim_init() has
233                  * been run. */                                                        \
234                 assert(caller);                                                        \
235                 /* We need to disable notifs here (in addition to not migrating), since
236                  * we could get interrupted when we're in the other TLS, and when the
237                  * vcore restarts us, it will put us in our old TLS, not the one we were
238                  * in when we were interrupted.  We need to not migrate, since once we
239                  * know the vcoreid, we depend on being on the same vcore throughout.*/\
240                 caller->flags |= UTHREAD_DONT_MIGRATE;                                 \
241                 /* Not concerned about cross-core memory ordering, so no CPU mbs needed.
242                  * The cmb is to prevent the compiler from issuing the vcore read before
243                  * the DONT_MIGRATE write. */                                          \
244                 cmb();                                                                 \
245                 vcoreid = vcore_id();                                                  \
246                 disable_notifs(vcoreid);                                               \
247         } else { /* vcore context */                                               \
248                 vcoreid = vcore_id();                                                  \
249         }                                                                          \
250         temp_tls_desc = get_tls_desc();                                            \
251         set_tls_desc(tls_desc);                                                    \
252         begin_safe_access_tls_vars();
253
254 #define end_access_tls_vars()                                                  \
255         end_safe_access_tls_vars();                                                \
256         set_tls_desc(temp_tls_desc);                                               \
257         if (!invcore) {                                                            \
258                 /* Note we reenable migration before enabling notifs, which is reverse
259                  * from how we disabled notifs.  We must enabling migration before
260                  * enabling notifs.  See 6c7fb12 and 5e4825eb4 for details. */         \
261                 caller->flags &= ~UTHREAD_DONT_MIGRATE;                                \
262                 cmb();  /* turn off DONT_MIGRATE before enabling notifs */             \
263                 enable_notifs(vcoreid);                                                \
264         }                                                                          \
265 }
266
267 #define safe_set_tls_var(name, val)                                            \
268 ({                                                                             \
269         begin_safe_access_tls_vars();                                              \
270         name = val;                                                                \
271         end_safe_access_tls_vars();                                                \
272 })
273
274 #define safe_get_tls_var(name)                                                 \
275 ({                                                                             \
276         typeof(name) __val;                                                        \
277         begin_safe_access_tls_vars();                                              \
278         __val = name;                                                              \
279         end_safe_access_tls_vars();                                                \
280         __val;                                                                     \
281 })
282
283 #define vcore_set_tls_var(name, val)                                           \
284 ({                                                                             \
285         typeof(val) __val = val;                                                   \
286         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
287         name = __val;                                                              \
288         end_access_tls_vars();                                                     \
289 })
290
291 #define vcore_get_tls_var(name)                                                \
292 ({                                                                             \
293         typeof(name) val;                                                          \
294         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
295         val = name;                                                                \
296         end_access_tls_vars();                                                     \
297         val;                                                                       \
298 })
299
300 __END_DECLS