Cleans up TLS access macros (XCC)
[akaros.git] / user / parlib / include / vcore.h
1 #ifndef _VCORE_H
2 #define _VCORE_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #include <arch/vcore.h>
9 #include <arch/atomic.h>
10 #include <sys/param.h>
11 #include <string.h>
12
13 /*****************************************************************************/
14 /* TODO: This is a complete hack, but necessary for vcore stuff to work for now
15  * The issue is that exit sometimes calls sys_yield(), and we can't recover from
16  * that properly under our vcore model (we shouldn't though).  We really need to
17  * rethink what sys_yield 'should' do when in multicore mode, or else come up 
18  * with a different syscall entirely. */
19 #include <stdlib.h>
20 #include <unistd.h>
21 #undef exit
22 #define exit(status) ros_syscall(SYS_proc_destroy, getpid(), status, 0, 0, 0, 0)
23 /*****************************************************************************/
24
25 #define LOG2_MAX_VCORES 6
26 #define MAX_VCORES (1 << LOG2_MAX_VCORES)
27
28 #define TRANSITION_STACK_PAGES 2
29 #define TRANSITION_STACK_SIZE (TRANSITION_STACK_PAGES*PGSIZE)
30
31 /* Defined by glibc; Must be implemented by a user level threading library */
32 extern void vcore_entry();
33 /* Declared in glibc's start.c */
34 extern __thread bool __vcore_context;
35 extern __thread int __vcoreid;
36
37 /* Vcore API functions */
38 static inline size_t max_vcores(void);
39 static inline size_t num_vcores(void);
40 static inline int vcore_id(void);
41 static inline bool in_vcore_context(void);
42 static inline bool in_multi_mode(void);
43 static inline void __enable_notifs(uint32_t vcoreid);
44 static inline void __disable_notifs(uint32_t vcoreid);
45 static inline bool notif_is_enabled(uint32_t vcoreid);
46 static inline bool vcore_is_mapped(uint32_t vcoreid);
47 static inline bool vcore_is_preempted(uint32_t vcoreid);
48 static inline struct preempt_data *vcpd_of(uint32_t vcoreid);
49 static inline bool preempt_is_pending(uint32_t vcoreid);
50 static inline bool __preempt_is_pending(uint32_t vcoreid);
51 int vcore_init(void);
52 void vcore_event_init(void);
53 void vcore_change_to_m(void);
54 int vcore_request(long nr_new_vcores);
55 void vcore_yield(bool preempt_pending);
56 bool clear_notif_pending(uint32_t vcoreid);
57 void enable_notifs(uint32_t vcoreid);
58 void disable_notifs(uint32_t vcoreid);
59 void vcore_idle(void);
60 void ensure_vcore_runs(uint32_t vcoreid);
61 void cpu_relax_vc(uint32_t vcoreid);
62
63 /* Static inlines */
64 static inline size_t max_vcores(void)
65 {
66         return MIN(__procinfo.max_vcores, MAX_VCORES);
67 }
68
69 static inline size_t num_vcores(void)
70 {
71         return __procinfo.num_vcores;
72 }
73
74 static inline int vcore_id(void)
75 {
76         return __vcoreid;
77 }
78
79 static inline bool in_vcore_context(void)
80 {
81         return __vcore_context;
82 }
83
84 static inline bool in_multi_mode(void)
85 {
86         return __procinfo.is_mcp;
87 }
88
89 /* Only call this if you know what you are doing. */
90 static inline void __enable_notifs(uint32_t vcoreid)
91 {
92         vcpd_of(vcoreid)->notif_disabled = FALSE;
93 }
94
95 static inline void __disable_notifs(uint32_t vcoreid)
96 {
97         vcpd_of(vcoreid)->notif_disabled = TRUE;
98 }
99
100 static inline bool notif_is_enabled(uint32_t vcoreid)
101 {
102         return !vcpd_of(vcoreid)->notif_disabled;
103 }
104
105 static inline bool vcore_is_mapped(uint32_t vcoreid)
106 {
107         return __procinfo.vcoremap[vcoreid].valid;
108 }
109
110 /* We could also check for VC_K_LOCK, but that's a bit much. */
111 static inline bool vcore_is_preempted(uint32_t vcoreid)
112 {
113         struct preempt_data *vcpd = vcpd_of(vcoreid);
114         return atomic_read(&vcpd->flags) & VC_PREEMPTED;
115 }
116
117 static inline struct preempt_data *vcpd_of(uint32_t vcoreid)
118 {
119         return &__procdata.vcore_preempt_data[vcoreid];
120 }
121
122 /* Uthread's can call this in case they care if a preemption is coming.  If a
123  * preempt is incoming, this will return TRUE, if you are in uthread context.  A
124  * reasonable response for a uthread is to yield, and vcore_entry will deal with
125  * the preempt pending.
126  *
127  * If you call this from vcore context, it will do nothing.  In general, it's
128  * not safe to just yield (or do whatever you plan on doing) from arbitrary
129  * places in vcore context.  So we just lie about PP. */
130 static inline bool preempt_is_pending(uint32_t vcoreid)
131 {
132         if (in_vcore_context())
133                 return FALSE;
134         return __preempt_is_pending(vcoreid);
135 }
136
137 static inline bool __preempt_is_pending(uint32_t vcoreid)
138 {
139         return __procinfo.vcoremap[vcoreid].preempt_pending;
140 }
141
142
143 #ifndef __PIC__
144
145 #define begin_safe_access_tls_vars()
146
147 #define end_safe_access_tls_vars()
148
149 #else
150
151 #include <features.h>
152 #if __GNUC_PREREQ(4,4)
153
154 /* These macro acrobatics trick the compiler into not caching the (linear)
155  * address of TLS variables across loads/stores of the TLS descriptor, in lieu
156  * of a "TLS cmb()". */
157 #define begin_safe_access_tls_vars()                                           \
158         void __attribute__((noinline, optimize("O0")))                             \
159         safe_access_tls_var_internal() {                                           \
160                 asm("");                                                               \
161
162 #define end_safe_access_tls_vars()                                             \
163         } safe_access_tls_var_internal();                                          \
164
165 #else
166
167 #define begin_safe_access_tls_vars()                                           \
168         printf("ERROR: For PIC use gcc 4.4 or above for tls support!\n");          \
169         printf("ERROR: As a quick fix, recompile your app with -static...\n");     \
170         exit(2);
171
172 #define end_safe_access_tls_vars()                                             \
173         printf("Will never be called because we abort above!");                    \
174         exit(2);
175
176 #endif //__GNUC_PREREQ
177 #endif // __PIC__
178
179 /* Switches into the TLS 'tls_desc'.  Capable of being called from either
180  * uthread or vcore context.  Pairs with end_access_tls_vars(). */
181 #define begin_access_tls_vars(tls_desc)                                        \
182 {                                                                              \
183         struct uthread *caller;                                                    \
184         uint32_t vcoreid;                                                          \
185         void *temp_tls_desc;                                                       \
186         bool invcore = in_vcore_context();                                         \
187         if (!invcore) {                                                            \
188                 caller = current_uthread;                                              \
189                 /* If you have no current_uthread, you might be called too early in the
190                  * process's lifetime.  Make sure something like uthread_slim_init() has
191                  * been run. */                                                        \
192                 assert(caller);                                                        \
193                 /* We need to disable notifs here (in addition to not migrating), since
194                  * we could get interrupted when we're in the other TLS, and when the
195                  * vcore restarts us, it will put us in our old TLS, not the one we were
196                  * in when we were interrupted.  We need to not migrate, since once we
197                  * know the vcoreid, we depend on being on the same vcore throughout.*/\
198                 caller->flags |= UTHREAD_DONT_MIGRATE;                                 \
199                 /* Not concerned about cross-core memory ordering, so no CPU mbs needed.
200                  * The cmb is to prevent the compiler from issuing the vcore read before
201                  * the DONT_MIGRATE write. */                                          \
202                 cmb();                                                                 \
203                 vcoreid = vcore_id();                                                  \
204                 disable_notifs(vcoreid);                                               \
205         } else { /* vcore context */                                               \
206                 vcoreid = vcore_id();                                                  \
207         }                                                                          \
208         temp_tls_desc = get_tls_desc(vcoreid);                                     \
209         set_tls_desc(tls_desc, vcoreid);                                           \
210         begin_safe_access_tls_vars();
211
212 #define end_access_tls_vars()                                                  \
213         end_safe_access_tls_vars();                                                \
214         set_tls_desc(temp_tls_desc, vcoreid);                                      \
215         if (!invcore) {                                                            \
216                 /* Note we reenable migration before enabling notifs, which is reverse
217                  * from how we disabled notifs.  We must enabling migration before
218                  * enabling notifs.  See 6c7fb12 and 5e4825eb4 for details. */         \
219                 caller->flags &= ~UTHREAD_DONT_MIGRATE;                                \
220                 cmb();  /* turn off DONT_MIGRATE before enabling notifs */             \
221                 if (in_multi_mode())                                                   \
222                         enable_notifs(vcoreid);                                            \
223         }                                                                          \
224 }
225
226 #define safe_set_tls_var(name, val)                                            \
227 ({                                                                             \
228         begin_safe_access_tls_vars();                                              \
229         name = val;                                                                \
230         end_safe_access_tls_vars();                                                \
231 })
232
233 #define safe_get_tls_var(name)                                                 \
234 ({                                                                             \
235         typeof(name) __val;                                                        \
236         begin_safe_access_tls_vars();                                              \
237         __val = name;                                                              \
238         end_safe_access_tls_vars();                                                \
239         __val;                                                                     \
240 })
241
242 #define vcore_set_tls_var(name, val)                                           \
243 ({                                                                             \
244         extern void** vcore_thread_control_blocks;                                 \
245         typeof(val) __val = val;                                                   \
246         begin_access_tls_vars(vcore_thread_control_blocks[vcoreid]);               \
247         name = __val;                                                              \
248         end_access_tls_vars();                                                     \
249 })
250
251 #define vcore_get_tls_var(name)                                                \
252 ({                                                                             \
253         typeof(name) val;                                                          \
254         begin_access_tls_vars(vcore_tls_descs[vcoreid]);                           \
255         val = name;                                                                \
256         end_access_tls_vars();                                                     \
257         val;                                                                       \
258 })
259
260 #ifdef __cplusplus
261 }
262 #endif
263
264 #endif