akaros/user/parlib/signal.c
<<
>>
Prefs
   1/* Copyright (c) 2013 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * Kevin Klues <klueska@cs.berkeley.edu>
   4 * See LICENSE for details.
   5 *
   6 * POSIX signal handling glue.  All glibc programs link against parlib, so they
   7 * will get this mixed in.  Mostly just registration of signal handlers.
   8 *
   9 * POSIX signal handling caveats:
  10 * - We don't copy signal handling tables or anything across forks or execs
  11 * - We don't send meaningful info in the siginfos, nor do we pass pid/uids on
  12 * signals coming from a kill.  This is especially pertinent for sigqueue,
  13 * which needs a payload (value) and sending PID
  14 * - We run handlers in vcore context, so any blocking syscall will spin.
  15 * Regular signals have restrictions on their syscalls too, though not this
  16 * great.  We could spawn off a uthread to run the handler, given that we have
  17 * a 2LS (which we don't for SCPs).
  18 * - We don't do anything with signal blocking/masking.  When in a signal
  19 * handler, you won't get interrupted with another signal handler (so long as
  20 * you run it in vcore context!).  With uthreads, you could get interrupted.
  21 * There is also no process wide signal blocking yet (sigprocmask()).  If this
  22 * is desired, we can abort certain signals when we h_p_signal(), 
  23 * - Likewise, we don't do waiting for particular signals yet.  Just about the
  24 * only thing we do is allow the registration of signal handlers. 
  25 * - Check each function for further notes.  */
  26
  27// Needed for sigmask functions...
  28#define _GNU_SOURCE
  29
  30#include <parlib/parlib.h>
  31#include <parlib/signal.h>
  32#include <parlib/uthread.h>
  33#include <parlib/event.h>
  34#include <parlib/ros_debug.h>
  35#include <errno.h>
  36#include <stdlib.h>
  37#include <parlib/assert.h>
  38#include <ros/procinfo.h>
  39#include <ros/syscall.h>
  40#include <sys/mman.h>
  41#include <parlib/stdio.h>
  42
  43/* Forward declare our signal_ops functions. */
  44static int __sigaltstack(__const struct sigaltstack *__restrict __ss,
  45                         struct sigaltstack *__restrict __oss);
  46static int __siginterrupt(int __sig, int __interrupt);
  47static int __sigpending(sigset_t *__set);
  48static int __sigprocmask(int __how, __const sigset_t *__restrict __set,
  49                         sigset_t *__restrict __oset);
  50static int __sigqueue(__pid_t __pid, int __sig, __const union sigval __val);
  51static int __sigreturn(struct sigcontext *__scp);
  52static int __sigstack(struct sigstack *__ss, struct sigstack *__oss);
  53static int __sigsuspend(__const sigset_t *__set);
  54static int __sigtimedwait(__const sigset_t *__restrict __set,
  55                          siginfo_t *__restrict __info,
  56                          __const struct timespec *__restrict __timeout);
  57static int __sigwait(__const sigset_t *__restrict __set, int *__restrict __sig);
  58static int __sigwaitinfo(__const sigset_t *__restrict __set,
  59                         siginfo_t *__restrict __info);
  60static int __sigself(int signo);
  61
  62/* The default definition of signal_ops (similar to sched_ops in uthread.c) */
  63struct signal_ops default_signal_ops = {
  64        .sigaltstack = __sigaltstack,
  65        .siginterrupt = __siginterrupt,
  66        .sigpending = __sigpending,
  67        .sigprocmask = __sigprocmask,
  68        .sigqueue = __sigqueue,
  69        .sigreturn = __sigreturn,
  70        .sigstack = __sigstack,
  71        .sigsuspend = __sigsuspend,
  72        .sigtimedwait = __sigtimedwait,
  73        .sigwait = __sigwait,
  74        .sigwaitinfo = __sigwaitinfo,
  75        .sigself = __sigself
  76};
  77
  78/* This is the catch all akaros event->posix signal handler.  All posix signals
  79 * are received in a single akaros event type.  They are then dispatched from
  80 * this function to their proper posix signal handler */
  81static void handle_event(struct event_msg *ev_msg, unsigned int ev_type,
  82                         void *data)
  83{
  84        int sig_nr;
  85        struct siginfo info = {0};
  86        info.si_code = SI_USER;
  87
  88        assert(ev_msg);
  89        sig_nr = ev_msg->ev_arg1;
  90        /* These POSIX signals are process-wide, but legacy applications and
  91         * their signal handlers often expect the signals to be routed to
  92         * particular threads.  This manifests in a couple ways: the signal
  93         * handlers expect a user context, and the program expects syscalls to
  94         * be interrupted.  Which context?  Which syscall?
  95         *
  96         * On Akaros, signals only go to the process, since there is no kernel
  97         * notion of a thread/task within a process.  All knowledge of
  98         * threads and how to resolve this mismatch between process-wide signals
  99         * and threads is held in the 2LS.  If we wanted to abort a syscall,
 100         * we'd need to know which one - after all, on Akaros syscalls are
 101         * asynchronous and it is only in the 2LS that they are coupled to
 102         * uthreads.  When it comes to routing the signal, the 2LS could do
 103         * something like pthread_kill, or just execute the handler. */
 104        sched_ops->got_posix_signal(sig_nr, &info);
 105}
 106
 107/* Called from uthread_slim_init() */
 108void init_posix_signals(void)
 109{
 110        struct event_queue *posix_sig_ev_q;
 111
 112        signal_ops = &default_signal_ops;
 113        register_ev_handler(EV_POSIX_SIGNAL, handle_event, 0);
 114        posix_sig_ev_q = get_eventq(EV_MBOX_UCQ);
 115        assert(posix_sig_ev_q);
 116        posix_sig_ev_q->ev_flags = EVENT_IPI | EVENT_INDIR | EVENT_SPAM_INDIR |
 117                                   EVENT_WAKEUP;
 118        register_kevent_q(posix_sig_ev_q, EV_POSIX_SIGNAL);
 119}
 120
 121/* Swap the contents of two user contexts (not just their pointers). */
 122static void swap_user_contexts(struct user_context *c1, struct user_context *c2)
 123{
 124        struct user_context temp_ctx;
 125
 126        temp_ctx = *c1;
 127        *c1 = *c2;
 128        *c2 = temp_ctx;
 129}
 130
 131/* Helper for checking a stack pointer.  It's possible the context we're
 132 * injecting signals into is complete garbage, so using the SP is a little
 133 * dangerous. */
 134static bool stack_ptr_is_sane(uintptr_t sp)
 135{
 136        if ((sp < PGSIZE) || (sp > ULIM))
 137                return FALSE;
 138        return TRUE;
 139}
 140
 141static bool uth_is_handling_sigs(struct uthread *uth)
 142{
 143        return uth->sigstate.data ? TRUE : FALSE;
 144}
 145
 146/* Prep a uthread to run a signal handler.  The original context of the uthread
 147 * is saved on its stack, and a new context is set up to run the signal handler
 148 * the next time the uthread is run. */
 149static void __prep_sighandler(struct uthread *uthread,
 150                              void (*entry)(void),
 151                              struct siginfo *info)
 152{
 153        uintptr_t stack;
 154        struct user_context *ctx;
 155
 156        if (uthread->flags & UTHREAD_SAVED) {
 157                ctx = &uthread->u_ctx;
 158        } else {
 159                assert(current_uthread == uthread);
 160                ctx = &vcpd_of(vcore_id())->uthread_ctx;
 161        }
 162        stack = get_user_ctx_sp(ctx) - sizeof(struct sigdata);
 163        stack = ROUNDDOWN(stack, __alignof__(struct sigdata));
 164        assert(stack_ptr_is_sane(stack));
 165        uthread->sigstate.data = (struct sigdata*)stack;
 166        /* Parlib aggressively saves the FP state for HW and VM ctxs.  SW ctxs
 167         * should not have FP state saved. */
 168        switch (ctx->type) {
 169        case ROS_HW_CTX:
 170        case ROS_VM_CTX:
 171                assert(uthread->flags & UTHREAD_FPSAVED);
 172                /* We need to save the already-saved FP state into the sigstate
 173                 * space.  The sig handler is taking over the uthread and its GP
 174                 * and FP spaces.
 175                 *
 176                 * If we ever go back to not aggressively saving the FP state,
 177                 * then for HW and VM ctxs, the state is in hardware.
 178                 * Regardless, we still need to save it in ->as, with something
 179                 * like: save_fp_state(&uthread->sigstate.data->as);
 180                 *
 181                 * Either way, when we're done with this entire function, the
 182                 * *uthread* will have ~UTHREAD_FPSAVED, since we will be
 183                 * talking about the SW context that is running the signal
 184                 * handler. */
 185                uthread->sigstate.data->as = uthread->as;
 186                uthread->flags &= ~UTHREAD_FPSAVED;
 187                break;
 188        case ROS_SW_CTX:
 189                assert(!(uthread->flags & UTHREAD_FPSAVED));
 190                break;
 191        };
 192        if (info != NULL)
 193                uthread->sigstate.data->info = *info;
 194
 195        if (uthread->sigstate.sigalt_stacktop != 0)
 196                stack = uthread->sigstate.sigalt_stacktop;
 197
 198        init_user_ctx(&uthread->sigstate.data->u_ctx, (uintptr_t)entry, stack);
 199        /* The uthread may or may not be UTHREAD_SAVED.  That depends on whether
 200         * the uthread was in that state initially.  We're swapping into the
 201         * location of 'ctx', which is either in VCPD or the uth itself. */
 202        swap_user_contexts(ctx, &uthread->sigstate.data->u_ctx);
 203}
 204
 205/* Restore the context saved as the result of running a signal handler on a
 206 * uthread. This context will execute the next time the uthread is run. */
 207static void __restore_after_sighandler(struct uthread *uthread)
 208{
 209        uthread->u_ctx = uthread->sigstate.data->u_ctx;
 210        uthread->flags |= UTHREAD_SAVED;
 211        switch (uthread->u_ctx.type) {
 212        case ROS_HW_CTX:
 213        case ROS_VM_CTX:
 214                uthread->as = uthread->sigstate.data->as;
 215                uthread->flags |= UTHREAD_FPSAVED;
 216                break;
 217        }
 218        uthread->sigstate.data = NULL;
 219}
 220
 221/* Callback when yielding a pthread after upon completion of a sighandler.  We
 222 * didn't save the current context on yeild, but that's ok because here we
 223 * restore the original saved context of the pthread and then treat this like a
 224 * normal voluntary yield. */
 225static void __exit_sighandler_cb(struct uthread *uthread, void *junk)
 226{
 227        __restore_after_sighandler(uthread);
 228        uthread_paused(uthread);
 229}
 230
 231/* Run a specific sighandler from the top of the sigstate stack. The 'info'
 232 * struct is prepopulated before the call is triggered as the result of a
 233 * reflected fault. */
 234static void __run_sighandler(void)
 235{
 236        struct uthread *uthread = current_uthread;
 237        int signo = uthread->sigstate.data->info.si_signo;
 238
 239        __sigdelset(&uthread->sigstate.pending, signo);
 240        trigger_posix_signal(signo, &uthread->sigstate.data->info,
 241                             &uthread->sigstate.data->u_ctx);
 242        uthread_yield(FALSE, __exit_sighandler_cb, 0);
 243}
 244
 245/* Run through all pending sighandlers and trigger them with a NULL info
 246 * field. These handlers are triggered as the result of thread directed
 247 * signals (i.e. not interprocess signals), and thus don't require individual
 248 * 'info' structs. */
 249static void __run_all_sighandlers(void)
 250{
 251        struct uthread *uthread = current_uthread;
 252        sigset_t andset = uthread->sigstate.pending & (~uthread->sigstate.mask);
 253
 254        for (int i = 1; i < _NSIG; i++) {
 255                if (__sigismember(&andset, i)) {
 256                        __sigdelset(&uthread->sigstate.pending, i);
 257                        trigger_posix_signal(i, NULL,
 258                                             &uthread->sigstate.data->u_ctx);
 259                }
 260        }
 261        uthread_yield(FALSE, __exit_sighandler_cb, 0);
 262}
 263
 264int uthread_signal(struct uthread *uthread, int signo)
 265{
 266        // Slightly racy with clearing of mask when triggering the signal, but
 267        // that's OK, as signals are inherently racy since they don't queue up.
 268        return sigaddset(&uthread->sigstate.pending, signo);
 269}
 270
 271/* If there are any pending signals, prep the uthread to run it's signal
 272 * handler. The next time the uthread is run, it will pop into it's signal
 273 * handler context instead of its original saved context. Once the signal
 274 * handler is complete, the original context will be restored and restarted. */
 275void uthread_prep_pending_signals(struct uthread *uthread)
 276{
 277        if (!uth_is_handling_sigs(uthread) && uthread->sigstate.pending) {
 278                sigset_t andset = uthread->sigstate.pending & (~uthread->sigstate.mask);
 279
 280                if (!__sigisemptyset(&andset))
 281                        __prep_sighandler(uthread, __run_all_sighandlers, NULL);
 282        }
 283}
 284
 285/* If the given signal is unmasked, prep the uthread to run it's signal
 286 * handler, but don't run it yet. In either case, make the uthread runnable
 287 * again. Once the signal handler is complete, the original context will be
 288 * restored and restarted. */
 289void uthread_prep_signal_from_fault(struct uthread *uthread,
 290                                    int signo, int code, void *addr)
 291{
 292        if (!__sigismember(&uthread->sigstate.mask, signo)) {
 293                struct siginfo info = {0};
 294
 295                if (uth_is_handling_sigs(uthread)) {
 296                        printf("Uthread sighandler faulted, signal: %d\n",
 297                               signo);
 298                        /* uthread.c already copied out the faulting ctx into
 299                         * the uth */
 300                        print_user_context(&uthread->u_ctx);
 301                        exit(-1);
 302                }
 303                info.si_signo = signo;
 304                info.si_code = code;
 305                info.si_addr = addr;
 306                __prep_sighandler(uthread, __run_sighandler, &info);
 307        }
 308}
 309
 310/* This is managed by vcore / 2LS code */
 311static int __sigaltstack(__const struct sigaltstack *__restrict __ss,
 312                         struct sigaltstack *__restrict __oss)
 313{
 314        if (__ss->ss_flags != 0) {
 315                errno = EINVAL;
 316                return -1;
 317        }
 318        if (__oss != NULL) {
 319                errno = EINVAL;
 320                return -1;
 321        }
 322        if (__ss->ss_size < MINSIGSTKSZ) {
 323                errno = ENOMEM;
 324                return -1;
 325        }
 326        uintptr_t stack_top = (uintptr_t) __ss->ss_sp + __ss->ss_size;
 327
 328        current_uthread->sigstate.sigalt_stacktop = stack_top;
 329        return 0;
 330}
 331
 332/* Akaros can't have signals interrupt syscalls to need a restart, though we can
 333 * re-wake-up the process while it is waiting for its syscall. */
 334static int __siginterrupt(int __sig, int __interrupt)
 335{
 336        return 0;
 337}
 338
 339/* Not really possible or relevant - you'd need to walk/examine the event UCQ */
 340static int __sigpending(sigset_t *__set)
 341{
 342        return 0;
 343}
 344
 345static int __sigprocmask(int __how, __const sigset_t *__restrict __set,
 346                         sigset_t *__restrict __oset)
 347{
 348        sigset_t *sigmask;
 349
 350        /* Signal handlers might call sigprocmask, with the intent of affecting
 351         * the uthread's sigmask.  Process-wide signal handlers run on behalf of
 352         * the entire process and aren't bound to a uthread, which means
 353         * sigprocmask won't work.  We can tell we're running one of these
 354         * handlers since we are in vcore context.  Uthread signals (e.g.
 355         * pthread_kill()) run from uthread context. */
 356        if (in_vcore_context()) {
 357                errno = ENOENT;
 358                return -1;
 359        }
 360
 361        sigmask = &current_uthread->sigstate.mask;
 362
 363        if (__set && (__how != SIG_BLOCK) &&
 364                     (__how != SIG_SETMASK) &&
 365                     (__how != SIG_UNBLOCK)) {
 366                errno = EINVAL;
 367                return -1;
 368        }
 369
 370        if (__oset)
 371                *__oset = *sigmask;
 372        if (__set) {
 373                switch (__how) {
 374                        case SIG_BLOCK:
 375                                *sigmask = *sigmask | *__set;
 376                                break;
 377                        case SIG_SETMASK:
 378                                *sigmask = *__set;
 379                                break;
 380                        case SIG_UNBLOCK:
 381                                *sigmask = *sigmask & ~(*__set);
 382                                break;
 383                }
 384        }
 385        return 0;
 386}
 387
 388/* Needs support with trigger_posix_signal to deal with passing values with
 389 * POSIX signals. */
 390static int __sigqueue(__pid_t __pid, int __sig, __const union sigval __val)
 391{
 392        return 0;
 393}
 394
 395/* Linux specific, and not really needed for us */
 396static int __sigreturn(struct sigcontext *__scp)
 397{
 398        return 0;
 399}
 400
 401/* This is managed by vcore / 2LS code */
 402static int __sigstack(struct sigstack *__ss, struct sigstack *__oss)
 403{
 404        return 0;
 405}
 406
 407/* Could do this with a loop on delivery of the signal, sleeping and getting
 408 * woken up by the kernel on any event, like we do with async syscalls. */
 409static int __sigsuspend(__const sigset_t *__set)
 410{
 411        return 0;
 412}
 413
 414/* Can be done similar to sigsuspend, with an extra alarm syscall */
 415static int __sigtimedwait(__const sigset_t *__restrict __set,
 416                          siginfo_t *__restrict __info,
 417                          __const struct timespec *__restrict __timeout)
 418{
 419        return 0;
 420}
 421
 422/* Can be done similar to sigsuspend */
 423static int __sigwait(__const sigset_t *__restrict __set, int *__restrict __sig)
 424{
 425        return 0;
 426}
 427
 428/* Can be done similar to sigsuspend */
 429static int __sigwaitinfo(__const sigset_t *__restrict __set,
 430                         siginfo_t *__restrict __info)
 431{
 432        return 0;
 433}
 434
 435static int __sigself(int signo)
 436{
 437        int ret;
 438
 439        if (in_vcore_context())
 440                return kill(getpid(), signo);
 441
 442        ret = uthread_signal(current_uthread, signo);
 443
 444        void cb(struct uthread *uthread, void *arg)
 445        {
 446                uthread_paused(uthread);
 447        }
 448        if (ret == 0)
 449                uthread_yield(TRUE, cb, 0);
 450        return ret;
 451}
 452