struct mcs_pdr_lock queue_lock;
int threads_ready = 0;
int threads_active = 0;
+atomic_t threads_total;
bool can_adjust_vcores = TRUE;
+bool need_tls = TRUE;
/* Array of per-vcore structs to manage waiting on syscalls and handling
* overflow. Init'd in pth_init(). */
void pth_thread_paused(struct uthread *uthread);
void pth_thread_blockon_sysc(struct uthread *uthread, void *sysc);
void pth_thread_has_blocked(struct uthread *uthread, int flags);
+void pth_thread_refl_fault(struct uthread *uthread, unsigned int trap_nr,
+ unsigned int err, unsigned long aux);
void pth_preempt_pending(void);
void pth_spawn_thread(uintptr_t pc_start, void *data);
pth_thread_paused,
pth_thread_blockon_sysc,
pth_thread_has_blocked,
+ pth_thread_refl_fault,
0, /* pth_preempt_pending, */
0, /* pth_spawn_thread, */
};
{
struct syscall *sysc = (struct syscall*)syscall;
int old_flags;
- bool need_to_restart = FALSE;
uint32_t vcoreid = vcore_id();
/* rip from the active queue */
struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
printf("For great justice!\n");
}
+void pth_thread_refl_fault(struct uthread *uthread, unsigned int trap_nr,
+ unsigned int err, unsigned long aux)
+{
+ struct pthread_tcb *pthread = (struct pthread_tcb*)uthread;
+ pthread->state = PTH_BLK_SYSC;
+ mcs_pdr_lock(&queue_lock);
+ threads_active--;
+ TAILQ_REMOVE(&active_queue, pthread, next);
+ mcs_pdr_unlock(&queue_lock);
+
+ if (trap_nr != 14) {
+ printf("Pthread has unhandled fault\n");
+ print_user_context(&uthread->u_ctx);
+ exit(-1);
+ }
+
+ if (!(err & PF_VMR_BACKED)) {
+ /* TODO: put your SIGSEGV handling here */
+ printf("Pthread page faulted outside a VMR\n");
+ print_user_context(&uthread->u_ctx);
+ exit(-1);
+ }
+ /* stitching for the event handler. sysc -> uth, uth -> sysc */
+ uthread->local_sysc.u_data = uthread;
+ uthread->sysc = &uthread->local_sysc;
+ pthread->state = PTH_BLK_SYSC;
+ /* one downside is that we'll never check the return val of the syscall. if
+ * we errored out, we wouldn't know til we PF'd again, and inspected the old
+ * retval/err and other sysc fields (make sure the PF is on the same addr,
+ * etc). could run into this issue on truncated files too. */
+ syscall_async(&uthread->local_sysc, SYS_populate_va, aux, 1);
+ if (!register_evq(&uthread->local_sysc, sysc_mgmt[vcore_id()].ev_q)) {
+ /* Lost the race with the call being done. The kernel won't send the
+ * event. Just restart him. */
+ restart_thread(&uthread->local_sysc);
+ }
+}
+
void pth_preempt_pending(void)
{
}
can_adjust_vcores = can;
}
+void pthread_need_tls(bool need)
+{
+ need_tls = need;
+}
+
/* Pthread interface stuff and helpers */
int pthread_attr_init(pthread_attr_t *a)
attr->stacksize = stacksize;
return 0;
}
+
int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
{
*stacksize = attr->stacksize;
t->joiner = 0;
assert(t->id == 0);
/* Put the new pthread (thread0) on the active queue */
- mcs_pdr_lock(&queue_lock); /* arguably, we don't need these (_S mode) */
+ mcs_pdr_lock(&queue_lock);
threads_active++;
TAILQ_INSERT_TAIL(&active_queue, t, next);
mcs_pdr_unlock(&queue_lock);
* have its init stuff use things like vcore stacks or TLSs, we'll need to
* change this. */
uthread_lib_init((struct uthread*)t);
+ atomic_init(&threads_total, 1); /* one for thread0 */
}
int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
void *(*start_routine)(void *), void *arg)
{
+ struct uth_thread_attr uth_attr = {0};
run_once(pthread_lib_init());
/* Create the actual thread */
struct pthread_tcb *pthread;
/* Set the u_tf to start up in __pthread_run, which will call the real
* start_routine and pass it the arg. Note those aren't set until later in
* pthread_create(). */
- init_user_ctx(&pthread->uthread.u_ctx, (long)&__pthread_run,
- (long)(pthread->stacktop));
+ init_user_ctx(&pthread->uthread.u_ctx, (uintptr_t)&__pthread_run,
+ (uintptr_t)(pthread->stacktop));
pthread->start_routine = start_routine;
pthread->arg = arg;
/* Initialize the uthread */
- uthread_init((struct uthread*)pthread);
+ if (need_tls)
+ uth_attr.want_tls = TRUE;
+ uthread_init((struct uthread*)pthread, &uth_attr);
pth_thread_runnable((struct uthread*)pthread);
*thread = pthread;
+ atomic_inc(&threads_total);
return 0;
}
pth_thread_runnable((struct uthread*)temp_pth);
}
}
+ /* If we were the last pthread, we exit for the whole process. Keep in mind
+ * that thread0 is counted in this, so this will only happen if that thread
+ * calls pthread_exit(). */
+ if ((atomic_fetch_and_add(&threads_total, -1) == 1))
+ exit(0);
}
void pthread_exit(void *ret)
{
struct pthread_tcb *pthread = pthread_self();
+ /* Some apps could call pthread_exit before initing. This will slow down
+ * our pthread exits slightly. */
+ pthread_lib_init();
pthread->retval = ret;
destroy_dtls();
uthread_yield(FALSE, __pth_exit_cb, 0);