Removes unused functions from Parlib's arch.h
[akaros.git] / user / parlib / include / vcore.h
1 #ifndef _VCORE_H
2 #define _VCORE_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #include <arch/vcore.h>
9 #include <arch/atomic.h>
10 #include <sys/param.h>
11 #include <string.h>
12 #include <timing.h>
13
14 /*****************************************************************************/
15 /* TODO: This is a complete hack, but necessary for vcore stuff to work for now
16  * The issue is that exit sometimes calls sys_yield(), and we can't recover from
17  * that properly under our vcore model (we shouldn't though).  We really need to
18  * rethink what sys_yield 'should' do when in multicore mode, or else come up 
19  * with a different syscall entirely. */
20 #include <stdlib.h>
21 #include <unistd.h>
22 #undef exit
23 #define exit(status) _exit(status)
24 /*****************************************************************************/
25
26 #define LOG2_MAX_VCORES 6
27 #define MAX_VCORES (1 << LOG2_MAX_VCORES)
28
29 #define TRANSITION_STACK_PAGES 2
30 #define TRANSITION_STACK_SIZE (TRANSITION_STACK_PAGES*PGSIZE)
31
32 /* Defined by glibc; Must be implemented by a user level threading library */
33 extern void vcore_entry();
34 /* Defined in glibc's start.c */
35 extern __thread bool __vcore_context;
36 extern __thread int __vcoreid;
37 /* Defined in vcore.c */
38 extern __thread struct syscall __vcore_one_sysc;        /* see sys_change_vcore */
39
40 /* Vcore API functions */
41 static inline uint32_t max_vcores(void);
42 static inline uint32_t num_vcores(void);
43 static inline int vcore_id(void);
44 static inline bool in_vcore_context(void);
45 static inline bool in_multi_mode(void);
46 static inline void __enable_notifs(uint32_t vcoreid);
47 static inline void __disable_notifs(uint32_t vcoreid);
48 static inline bool notif_is_enabled(uint32_t vcoreid);
49 static inline bool vcore_is_mapped(uint32_t vcoreid);
50 static inline bool vcore_is_preempted(uint32_t vcoreid);
51 static inline struct preempt_data *vcpd_of(uint32_t vcoreid);
52 static inline bool preempt_is_pending(uint32_t vcoreid);
53 static inline bool __preempt_is_pending(uint32_t vcoreid);
54 static inline void *get_vcpd_tls_desc(uint32_t vcoreid);
55 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc);
56 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid);
57 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid);
58 void vcore_init(void);
59 void vcore_event_init(void);
60 void vcore_change_to_m(void);
61 int vcore_request(long nr_new_vcores);
62 void vcore_yield(bool preempt_pending);
63 void vcore_reenter(void (*entry_func)(void));
64 void enable_notifs(uint32_t vcoreid);
65 void disable_notifs(uint32_t vcoreid);
66 void vcore_idle(void);
67 void ensure_vcore_runs(uint32_t vcoreid);
68 void cpu_relax_vc(uint32_t vcoreid);
69 uint32_t get_vcoreid(void);
70 bool check_vcoreid(const char *str, uint32_t vcoreid);
71
72 /* Static inlines */
73 static inline uint32_t max_vcores(void)
74 {
75         return MIN(__procinfo.max_vcores, MAX_VCORES);
76 }
77
78 static inline uint32_t num_vcores(void)
79 {
80         return __procinfo.num_vcores;
81 }
82
83 static inline int vcore_id(void)
84 {
85         return __vcoreid;
86 }
87
88 static inline bool in_vcore_context(void)
89 {
90         return __vcore_context;
91 }
92
93 static inline bool in_multi_mode(void)
94 {
95         return __procinfo.is_mcp;
96 }
97
98 /* Only call this if you know what you are doing. */
99 static inline void __enable_notifs(uint32_t vcoreid)
100 {
101         vcpd_of(vcoreid)->notif_disabled = FALSE;
102 }
103
104 static inline void __disable_notifs(uint32_t vcoreid)
105 {
106         vcpd_of(vcoreid)->notif_disabled = TRUE;
107 }
108
109 static inline bool notif_is_enabled(uint32_t vcoreid)
110 {
111         return !vcpd_of(vcoreid)->notif_disabled;
112 }
113
114 static inline bool vcore_is_mapped(uint32_t vcoreid)
115 {
116         return __procinfo.vcoremap[vcoreid].valid;
117 }
118
119 /* We could also check for VC_K_LOCK, but that's a bit much. */
120 static inline bool vcore_is_preempted(uint32_t vcoreid)
121 {
122         struct preempt_data *vcpd = vcpd_of(vcoreid);
123         return atomic_read(&vcpd->flags) & VC_PREEMPTED;
124 }
125
126 static inline struct preempt_data *vcpd_of(uint32_t vcoreid)
127 {
128         return &__procdata.vcore_preempt_data[vcoreid];
129 }
130
131 /* Uthread's can call this in case they care if a preemption is coming.  If a
132  * preempt is incoming, this will return TRUE, if you are in uthread context.  A
133  * reasonable response for a uthread is to yield, and vcore_entry will deal with
134  * the preempt pending.
135  *
136  * If you call this from vcore context, it will do nothing.  In general, it's
137  * not safe to just yield (or do whatever you plan on doing) from arbitrary
138  * places in vcore context.  So we just lie about PP. */
139 static inline bool preempt_is_pending(uint32_t vcoreid)
140 {
141         if (in_vcore_context())
142                 return FALSE;
143         return __preempt_is_pending(vcoreid);
144 }
145
146 static inline bool __preempt_is_pending(uint32_t vcoreid)
147 {
148         return __procinfo.vcoremap[vcoreid].preempt_pending;
149 }
150
151 /* The kernel interface uses uintptr_t, but we have a lot of older code that
152  * uses void *, hence the casting. */
153 static inline void *get_vcpd_tls_desc(uint32_t vcoreid)
154 {
155         return (void*)__procdata.vcore_preempt_data[vcoreid].vcore_tls_desc;
156 }
157
158 static inline void set_vcpd_tls_desc(uint32_t vcoreid, void *tls_desc)
159 {
160         __procdata.vcore_preempt_data[vcoreid].vcore_tls_desc = (uintptr_t)tls_desc;
161 }
162
163 static inline uint64_t vcore_account_resume_ticks(uint32_t vcoreid)
164 {
165         return __procinfo.vcoremap[vcoreid].resume_ticks;
166 }
167
168 static inline uint64_t vcore_account_resume_nsec(uint32_t vcoreid)
169 {
170         return tsc2nsec(vcore_account_resume_ticks(vcoreid));
171 }
172
173 static inline uint64_t vcore_account_total_ticks(uint32_t vcoreid)
174 {
175         return __procinfo.vcoremap[vcoreid].total_ticks;
176 }
177
178 static inline uint64_t vcore_account_total_nsec(uint32_t vcoreid)
179 {
180         return tsc2nsec(vcore_account_total_ticks(vcoreid));
181 }
182
183 static inline uint64_t vcore_account_uptime_ticks(uint32_t vcoreid)
184 {
185         uint64_t resume = __procinfo.vcoremap[vcoreid].resume_ticks; 
186         uint64_t total = __procinfo.vcoremap[vcoreid].total_ticks; 
187         uint64_t now = read_tsc();
188         return now - resume + total;
189 }
190
191 static inline uint64_t vcore_account_uptime_nsec(uint32_t vcoreid)
192 {
193         return tsc2nsec(vcore_account_uptime_ticks(vcoreid));
194 }
195
196 #ifndef __PIC__
197
198 #define begin_safe_access_tls_vars()
199
200 #define end_safe_access_tls_vars()
201
202 #else
203
204 #include <features.h>
205 #if __GNUC_PREREQ(4,4)
206
207 /* These macro acrobatics trick the compiler into not caching the (linear)
208  * address of TLS variables across loads/stores of the TLS descriptor, in lieu
209  * of a "TLS cmb()". */
210 #define begin_safe_access_tls_vars()                                           \
211 {                                                                              \
212         void __attribute__((noinline, optimize("O0")))                             \
213         safe_access_tls_var_internal() {                                           \
214                 asm("");                                                               \
215
216 #define end_safe_access_tls_vars()                                             \
217         } safe_access_tls_var_internal();                                          \
218 }
219
220 #else
221
222 #define begin_safe_access_tls_vars()                                           \
223         printf("ERROR: For PIC use gcc 4.4 or above for tls support!\n");          \
224         printf("ERROR: As a quick fix, recompile your app with -static...\n");     \
225         exit(2);
226
227 #define end_safe_access_tls_vars()                                             \
228         printf("Will never be called because we abort above!");                    \
229         exit(2);
230
231 #endif //__GNUC_PREREQ
232 #endif // __PIC__
233
234 /* Switches into the TLS 'tls_desc'.  Capable of being called from either
235  * uthread or vcore context.  Pairs with end_access_tls_vars(). */
236 #define begin_access_tls_vars(tls_desc)                                        \
237 {                                                                              \
238         struct uthread *caller;                                                    \
239         uint32_t vcoreid;                                                          \
240         void *temp_tls_desc;                                                       \
241         bool invcore = in_vcore_context();                                         \
242         if (!invcore) {                                                            \
243                 caller = current_uthread;                                              \
244                 /* If you have no current_uthread, you might be called too early in the
245                  * process's lifetime.  Make sure something like uthread_slim_init() has
246                  * been run. */                                                        \
247                 assert(caller);                                                        \
248                 /* We need to disable notifs here (in addition to not migrating), since
249                  * we could get interrupted when we're in the other TLS, and when the
250                  * vcore restarts us, it will put us in our old TLS, not the one we were
251                  * in when we were interrupted.  We need to not migrate, since once we
252                  * know the vcoreid, we depend on being on the same vcore throughout.*/\
253                 caller->flags |= UTHREAD_DONT_MIGRATE;                                 \
254                 /* Not concerned about cross-core memory ordering, so no CPU mbs needed.
255                  * The cmb is to prevent the compiler from issuing the vcore read before
256                  * the DONT_MIGRATE write. */                                          \
257                 cmb();                                                                 \
258                 vcoreid = vcore_id();                                                  \
259                 disable_notifs(vcoreid);                                               \
260         } else { /* vcore context */                                               \
261                 vcoreid = vcore_id();                                                  \
262         }                                                                          \
263         temp_tls_desc = get_tls_desc(vcoreid);                                     \
264         set_tls_desc(tls_desc, vcoreid);                                           \
265         begin_safe_access_tls_vars();
266
267 #define end_access_tls_vars()                                                  \
268         end_safe_access_tls_vars();                                                \
269         set_tls_desc(temp_tls_desc, vcoreid);                                      \
270         if (!invcore) {                                                            \
271                 /* Note we reenable migration before enabling notifs, which is reverse
272                  * from how we disabled notifs.  We must enabling migration before
273                  * enabling notifs.  See 6c7fb12 and 5e4825eb4 for details. */         \
274                 caller->flags &= ~UTHREAD_DONT_MIGRATE;                                \
275                 cmb();  /* turn off DONT_MIGRATE before enabling notifs */             \
276                 if (in_multi_mode())                                                   \
277                         enable_notifs(vcoreid);                                            \
278         }                                                                          \
279 }
280
281 #define safe_set_tls_var(name, val)                                            \
282 ({                                                                             \
283         begin_safe_access_tls_vars();                                              \
284         name = val;                                                                \
285         end_safe_access_tls_vars();                                                \
286 })
287
288 #define safe_get_tls_var(name)                                                 \
289 ({                                                                             \
290         typeof(name) __val;                                                        \
291         begin_safe_access_tls_vars();                                              \
292         __val = name;                                                              \
293         end_safe_access_tls_vars();                                                \
294         __val;                                                                     \
295 })
296
297 #define vcore_set_tls_var(name, val)                                           \
298 ({                                                                             \
299         typeof(val) __val = val;                                                   \
300         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
301         name = __val;                                                              \
302         end_access_tls_vars();                                                     \
303 })
304
305 #define vcore_get_tls_var(name)                                                \
306 ({                                                                             \
307         typeof(name) val;                                                          \
308         begin_access_tls_vars(get_vcpd_tls_desc(vcoreid));                         \
309         val = name;                                                                \
310         end_access_tls_vars();                                                     \
311         val;                                                                       \
312 })
313
314 #ifdef __cplusplus
315 }
316 #endif
317
318 #endif