akaros/kern/src/event.c
<<
>>
Prefs
   1/* Copyright (c) 2011 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Kernel utility functions for sending events and notifications (IPIs) to
   6 * processes. */
   7
   8#include <ucq.h>
   9#include <ceq.h>
  10#include <bitmask.h>
  11#include <event.h>
  12#include <atomic.h>
  13#include <process.h>
  14#include <smp.h>
  15#include <umem.h>
  16#include <stdio.h>
  17#include <assert.h>
  18#include <pmap.h>
  19#include <schedule.h>
  20
  21/* Note these three helpers return the user address of the mbox, not the KVA.
  22 * Load current to access this, and it will work for any process. */
  23static struct event_mbox *get_vcpd_mbox_priv(uint32_t vcoreid)
  24{
  25        return &__procdata.vcore_preempt_data[vcoreid].ev_mbox_private;
  26}
  27
  28static struct event_mbox *get_vcpd_mbox_pub(uint32_t vcoreid)
  29{
  30        return &__procdata.vcore_preempt_data[vcoreid].ev_mbox_public;
  31}
  32
  33static struct event_mbox *get_vcpd_mbox(uint32_t vcoreid, int ev_flags)
  34{
  35        if (ev_flags & EVENT_VCORE_PRIVATE)
  36                return get_vcpd_mbox_priv(vcoreid);
  37        else
  38                return get_vcpd_mbox_pub(vcoreid);
  39}
  40
  41/* Can we message the vcore?  (Will it check its messages).  Note this checks
  42 * procdata via the user pointer. */
  43static bool can_msg_vcore(uint32_t vcoreid)
  44{
  45        struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
  46        return atomic_read(&vcpd->flags) & VC_CAN_RCV_MSG;
  47}
  48
  49/* Says a vcore can be messaged.  Only call this once you are sure this is true
  50 * (holding the proc_lock, etc). */
  51static void set_vcore_msgable(uint32_t vcoreid)
  52{
  53        struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
  54        atomic_or(&vcpd->flags, VC_CAN_RCV_MSG);
  55}
  56
  57static void send_evbitmap_msg(struct evbitmap *evbm, struct event_msg *msg)
  58{
  59        SET_BITMASK_BIT_ATOMIC(evbm->bitmap, msg->ev_type);
  60        wmb();
  61        evbm->check_bits = TRUE;
  62}
  63
  64/* Posts a message to the mbox.  mbox is a pointer to user-accessible memory.
  65 * If mbox is a user-provided pointer, make sure that you've checked it.
  66 * Regardless make sure you have that process's address space loaded. */
  67static void post_ev_msg(struct proc *p, struct event_mbox *mbox,
  68                        struct event_msg *msg, int ev_flags)
  69{
  70        printd("[kernel] Sending event type %d to mbox %p\n",
  71               msg->ev_type, mbox);
  72        /* Sanity check */
  73        assert(p);
  74        switch (mbox->type) {
  75        case (EV_MBOX_UCQ):
  76                send_ucq_msg(&mbox->ucq, p, msg);
  77                break;
  78        case (EV_MBOX_BITMAP):
  79                send_evbitmap_msg(&mbox->evbm, msg);
  80                break;
  81        case (EV_MBOX_CEQ):
  82                send_ceq_msg(&mbox->ceq, p, msg);
  83                break;
  84        default:
  85                printk("[kernel] Unknown mbox type %d!\n", mbox->type);
  86        }
  87}
  88
  89/* Helper: use this when sending a message to a VCPD mbox.  It just posts to the
  90 * ev_mbox and sets notif pending.  Note this uses a userspace address for the
  91 * VCPD (though not a user's pointer). */
  92static void post_vc_msg(struct proc *p, uint32_t vcoreid,
  93                        struct event_mbox *ev_mbox, struct event_msg *ev_msg,
  94                        int ev_flags)
  95{
  96        struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
  97        post_ev_msg(p, ev_mbox, ev_msg, ev_flags);
  98        /* Set notif pending so userspace doesn't miss the message while
  99         * yielding */
 100        wmb(); /* Ensure ev_msg write is before notif_pending */
 101        /* proc_notify() also sets this, but the ev_q might not have requested
 102         * an IPI, so we have to do it here too. */
 103        vcpd->notif_pending = TRUE;
 104}
 105
 106/* Helper: will IPI / proc_notify if the flags say so.  We also check to make
 107 * sure it is mapped (slight optimization) */
 108static void try_notify(struct proc *p, uint32_t vcoreid, int ev_flags)
 109{
 110        /* Note this is an unlocked-peek at the vcoremap */
 111        if ((ev_flags & EVENT_IPI) && vcore_is_mapped(p, vcoreid))
 112                proc_notify(p, vcoreid);
 113}
 114
 115/* Helper: sends the message and an optional IPI to the vcore.  Sends to the
 116 * public mbox. */
 117static void spam_vcore(struct proc *p, uint32_t vcoreid,
 118                       struct event_msg *ev_msg, int ev_flags)
 119{
 120        post_vc_msg(p, vcoreid, get_vcpd_mbox_pub(vcoreid), ev_msg, ev_flags);
 121        try_notify(p, vcoreid, ev_flags);
 122}
 123
 124/* Attempts to message a vcore that may or may not have VC_CAN_RCV_MSG set.  If
 125 * so, we'll post the message and the message will eventually get dealt with
 126 * (when the vcore runs or when it is preempte-recovered). */
 127static bool try_spam_vcore(struct proc *p, uint32_t vcoreid,
 128                           struct event_msg *ev_msg, int ev_flags)
 129{
 130        /* Not sure if we can or not, so check before spamming.  Technically,
 131         * the only critical part is that we __alert, then check can_alert. */
 132        if (can_msg_vcore(vcoreid)) {
 133                spam_vcore(p, vcoreid, ev_msg, ev_flags);
 134                /* prev write (notif_pending) must come before following reads*/
 135                wrmb();
 136                if (can_msg_vcore(vcoreid))
 137                        return TRUE;
 138        }
 139        return FALSE;
 140}
 141
 142/* Helper: will try to message (INDIR/IPI) a list member (lists of vcores).  We
 143 * use this on the online and bulk_preempted vcore lists.  If this succeeds in
 144 * alerting a vcore on the list, it'll return TRUE.  We need to be careful here,
 145 * since we're reading a list that could be concurrently modified.  The
 146 * important thing is that we can always fail if we're unsure (such as with
 147 * lists being temporarily empty).  The caller will be able to deal with it via
 148 * the ultimate fallback. */
 149static bool spam_list_member(struct vcore_tailq *list, struct proc *p,
 150                             struct event_msg *ev_msg, int ev_flags)
 151{
 152        struct vcore *vc, *vc_first;
 153        uint32_t vcoreid;
 154        int loops = 0;
 155        vc = TAILQ_FIRST(list);
 156        /* If the list appears empty, we'll bail out (failing) after the loop.
 157         */
 158        while (vc) {
 159                vcoreid = vcore2vcoreid(p, vc);
 160                /* post the alert.  Not using the try_spam_vcore() helper since
 161                 * I want something more customized for the lists. */
 162                spam_vcore(p, vcoreid, ev_msg, ev_flags);
 163                /* prev write (notif_pending) must come before following reads*/
 164                wrmb();
 165                /* I used to check can_msg_vcore(vcoreid) here, but that would
 166                 * make spamming list members unusable for MUST_RUN scenarios.
 167                 *
 168                 * Regardless, if they are still the first on the list, then
 169                 * they are still going to get the message.  For the online
 170                 * list, proc_yield() will return them to userspace (where they
 171                 * will get the message) because __alert_vcore() set
 172                 * notif_pending.  For the BP list, they will either be turned
 173                 * on later, or have a preempt message sent about their demise.
 174                 *
 175                 * We race on list membership (and not exclusively
 176                 * VC_CAN_RCV_MSG, so that when it fails we can get a new vcore
 177                 * to try (or know WHP there are none). */
 178                vc_first = TAILQ_FIRST(list);
 179                if (vc == vc_first)
 180                        return TRUE;
 181                /* At this point, the list has changed and the vcore we tried
 182                 * yielded, so we try the *new* list head.  Track loops for
 183                 * sanity reasons. */
 184                if (loops++ > 10) {
 185                        warn("Too many (%d) attempts to find a vcore, failing!",
 186                             loops);
 187                        return FALSE;   /* always safe to fail! */
 188                }
 189                /* Get set up for your attack run! */
 190                vc = vc_first;
 191        }
 192        return FALSE;
 193}
 194
 195/* This makes sure ev_msg is sent to some vcore, preferring vcoreid.
 196 *
 197 * One of the goals of SPAM_INDIR (and this func) is to allow processes to yield
 198 * cores without fear of losing messages.  Even when yielding and getting
 199 * preempted, if your message is spammed, it will get to some vcore.  If
 200 * MUST_RUN is set, it'll get to a running vcore.  Messages that you send like
 201 * this must be able to handle spurious reads, since more than one vcore is
 202 * likely to get the message and handle it.
 203 *
 204 * We try the desired vcore, using VC_CAN_RCV_MSG.  Failing that, we'll search
 205 * the online and then the bulk_preempted lists.  These lists serve as a way to
 206 * find likely messageable vcores.  spam_list_member() helps us with them,
 207 * failing if anything seems to go wrong.  At which point we just lock and try
 208 * to deal with things.  In that scenario, we most likely would need to lock
 209 * anyway to wake up the process (was WAITING).
 210 *
 211 * One tricky thing with sending to the bulk_preempt list is that we may want to
 212 * send a message about a (bulk) preemption to someone on that list.  This works
 213 * since a given vcore that was preempted will be removed from that list before
 214 * we try to send_event() (in theory, there isn't code that can send that event
 215 * yet).  Someone else will get the event and wake up the preempted vcore. */
 216static void spam_public_msg(struct proc *p, struct event_msg *ev_msg,
 217                            uint32_t vcoreid, int ev_flags)
 218{
 219        struct vcore *vc;
 220        if (!__proc_is_mcp(p)) {
 221                spam_vcore(p, 0, ev_msg, ev_flags);
 222                return;
 223        }
 224        if (ev_flags & EVENT_VCORE_MUST_RUN) {
 225                /* Could check for waiting and skip these spams, which will
 226                 * fail.  Could also skip trying for vcoreid, and just spam any
 227                 * old online VC. */
 228                if (vcore_is_mapped(p, vcoreid)) {
 229                        /* check, signal, check again */
 230                        spam_vcore(p, vcoreid, ev_msg, ev_flags);
 231                        /* notif_pending write must come before following read
 232                         */
 233                        wrmb();
 234                        if (vcore_is_mapped(p, vcoreid))
 235                                return;
 236                }
 237                if (spam_list_member(&p->online_vcs, p, ev_msg, ev_flags))
 238                        return;
 239                goto ultimate_fallback;
 240        }
 241        /* First, try posting to the desired vcore */
 242        if (try_spam_vcore(p, vcoreid, ev_msg, ev_flags))
 243                return;
 244        /* If the process is WAITING, let's just jump to the fallback */
 245        if (p->state == PROC_WAITING)
 246                goto ultimate_fallback;
 247        /* If we're here, the desired vcore is unreachable, but the process is
 248         * probably RUNNING_M (online_vs) or RUNNABLE_M (bulk preempted or
 249         * recently woken up), so we'll need to find another vcore. */
 250        if (spam_list_member(&p->online_vcs, p, ev_msg, ev_flags))
 251                return;
 252        if (spam_list_member(&p->bulk_preempted_vcs, p, ev_msg, ev_flags))
 253                return;
 254        /* Last chance, let's check the head of the inactives.  It might be
 255         * alertable (the kernel set it earlier due to an event, or it was a
 256         * bulk_preempt that didn't restart), and we can avoid grabbing the
 257         * proc_lock. */
 258        vc = TAILQ_FIRST(&p->inactive_vcs);
 259        if (vc) {       /* might be none in rare circumstances */
 260                if (try_spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags)) {
 261                        /* It's possible that we're WAITING here.  EVENT_WAKEUP
 262                         * will handle it.  One way for this to happen is if a
 263                         * normal vcore was preempted right as another vcore was
 264                         * yielding, and the preempted message was sent after
 265                         * the last vcore yielded (which caused us to be
 266                         * WAITING). */
 267                        return;
 268                }
 269        }
 270ultimate_fallback:
 271        /* At this point, we can't find one.  This could be due to a (hopefully
 272         * rare) weird yield/request storm, or more commonly because the lists
 273         * were empty and the process is simply WAITING (yielded all of its
 274         * vcores and is waiting on an event).  Time for the ultimate fallback:
 275         * locking.  Note that when we __alert_vcore(), there is a chance we
 276         * need to mmap, which grabs the vmr_lock and pte_lock. */
 277        spin_lock(&p->proc_lock);
 278        if (p->state != PROC_WAITING) {
 279                /* We need to check the online and bulk_preempt lists again, now
 280                 * that we are sure no one is messing with them.  If we're
 281                 * WAITING, we can skip these (or assert they are empty!). */
 282                vc = TAILQ_FIRST(&p->online_vcs);
 283                if (vc) {
 284                        /* there's an online vcore, so just alert it (we know it
 285                         * isn't going anywhere), and return */
 286                        spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags);
 287                        spin_unlock(&p->proc_lock);
 288                        return;
 289                }
 290                vc = TAILQ_FIRST(&p->bulk_preempted_vcs);
 291                if (vc) {
 292                        /* the process is bulk preempted, similar deal to above
 293                         */
 294                        spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags);
 295                        spin_unlock(&p->proc_lock);
 296                        return;
 297                }
 298        }
 299        /* At this point, we're sure all vcores are yielded, though we might not
 300         * be WAITING.  Post to the first on the inactive list (which is the one
 301         * that will definitely be woken up) */
 302        vc = TAILQ_FIRST(&p->inactive_vcs);
 303        assert(vc);
 304        spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags);
 305        /* Set the vcore's alertable flag, to short circuit our last ditch
 306         * effort above */
 307        set_vcore_msgable(vcore2vcoreid(p, vc));
 308        /* The first event to catch the process with no online/bp vcores will
 309         * need to wake it up, which is handled elsewhere if they requested
 310         * EVENT_WAKEUP.  We could be RUNNABLE_M here if another event already
 311         * woke us and we didn't get lucky with the penultimate fallback. */
 312        spin_unlock(&p->proc_lock);
 313}
 314
 315/* Helper: sends an indirection event for an ev_q, preferring vcoreid */
 316static void send_indir(struct proc *p, struct event_queue *ev_q,
 317                       uint32_t vcoreid)
 318{
 319        struct event_msg local_msg = {0};
 320        /* If an alert is already pending and they don't want repeats, just
 321         * return.  One of the few uses of NOTHROTTLE will be for preempt_msg
 322         * ev_qs.  Ex: an INDIR was already sent to the preempted vcore, then
 323         * alert throttling would stop another vcore from getting the message
 324         * about the original vcore. */
 325        if (!(ev_q->ev_flags & EVENT_NOTHROTTLE) && (ev_q->ev_alert_pending))
 326                return;
 327        /* We'll eventually get an INDIR through, so don't send any more til
 328         * userspace toggles this.  Regardless of other writers to this flag, we
 329         * eventually send an alert that causes userspace to turn throttling off
 330         * again (before handling all of the ev_q's events).
 331         *
 332         * This will also squelch IPIs, since there's no reason to send the IPI
 333         * if the INDIR is still un-acknowledged.  The vcore is either in vcore
 334         * context, attempting to deal with the INDIR, or offline.  This
 335         * statement is probably true. */
 336        ev_q->ev_alert_pending = TRUE;
 337        wmb();  /* force this write to happen before any event writes */
 338        local_msg.ev_type = EV_EVENT;
 339        local_msg.ev_arg3 = ev_q;
 340        /* If we're not spamming indirs, just send and be done with it.
 341         *
 342         * It's possible that the user does not want to poll their evq and wants
 343         * an INDIR, but also doesn't care about sleeping or otherwise not
 344         * getting the message right away.  The INDIR could sit in the VCPD of a
 345         * vcore that doesn't run for a while.  Perhaps if the app always made
 346         * sure VC 0 was on when it was running at all, and sent the INDIR
 347         * there.  Or there was a per-vc evq that only needed to be handled when
 348         * the VC turned on.  This gets at another aspect of INDIRs, other than
 349         * it's need for "only once" operation: maybe the mbox type isn't a UCQ
 350         * (like the VCPD mboxes). */
 351        if (!(ev_q->ev_flags & EVENT_SPAM_INDIR)) {
 352                spam_vcore(p, vcoreid, &local_msg, ev_q->ev_flags);
 353                return;
 354        }
 355        /* At this point, we actually want to send and spam an INDIR.
 356         * This will guarantee the message makes it to some vcore. */
 357        spam_public_msg(p, &local_msg, vcoreid, ev_q->ev_flags);
 358}
 359
 360/* Send an event to ev_q, based on the parameters in ev_q's flag.  We don't
 361 * accept null ev_qs, since the caller ought to be checking before bothering to
 362 * make a msg and send it to the event_q.  Vcoreid is who the kernel thinks the
 363 * message ought to go to (for IPIs).  Appropriate for things like
 364 * EV_PREEMPT_PENDING, where we tell the affected vcore.  To have the message go
 365 * where the kernel suggests, set EVENT_VCORE_APPRO(priate). */
 366void send_event(struct proc *p, struct event_queue *ev_q, struct event_msg *msg,
 367                uint32_t vcoreid)
 368{
 369        uintptr_t old_proc;
 370        struct event_mbox *ev_mbox = 0;
 371
 372        assert(!in_irq_ctx(&per_cpu_info[core_id()]));
 373        assert(p);
 374        if (proc_is_dying(p))
 375                return;
 376        printd("[kernel] sending msg to proc %p, ev_q %p\n", p, ev_q);
 377        assert(is_user_rwaddr(ev_q, sizeof(struct event_queue)));
 378        /* ev_q is a user pointer, so we need to make sure we're in the right
 379         * address space */
 380        old_proc = switch_to(p);
 381        /* Get the vcoreid that we'll message (if appropriate).  For INDIR and
 382         * SPAMMING, this is the first choice of a vcore, but other vcores might
 383         * get it.  Common case is !APPRO and !ROUNDROBIN.  Note we are
 384         * clobbering the vcoreid parameter. */
 385        if (!(ev_q->ev_flags & EVENT_VCORE_APPRO))
 386                vcoreid = ev_q->ev_vcore;       /* use the ev_q's vcoreid */
 387        /* Note that RR overwrites APPRO */
 388        if (ev_q->ev_flags & EVENT_ROUNDROBIN) {
 389                /* Pick a vcore, round-robin style.  Assuming ev_vcore was the
 390                 * previous one used.  Note that round-robin overrides the
 391                 * passed-in vcoreid.  Also note this may be 'wrong' if
 392                 * num_vcores changes.  Also also note that SCPs currently have
 393                 * 0 vcores. */
 394                if (__proc_is_mcp(p)) {
 395                        vcoreid = (ev_q->ev_vcore + 1) %
 396                                  p->procinfo->num_vcores;
 397                } else {
 398                        vcoreid = 0;
 399                }
 400                ev_q->ev_vcore = vcoreid;
 401        }
 402        if (!proc_vcoreid_is_safe(p, vcoreid)) {
 403                /* Ought to kill them, just warn for now */
 404                printk("[kernel] Vcoreid %d unsafe! (too big?)\n", vcoreid);
 405                goto out;
 406        }
 407        /* If we're a SPAM_PUBLIC, they just want us to spam the message.  Note
 408         * we don't care about the mbox, since it'll go to VCPD public mboxes,
 409         * and we'll prefer to send it to whatever vcoreid we determined at this
 410         * point (via APPRO or whatever). */
 411        if (ev_q->ev_flags & EVENT_SPAM_PUBLIC) {
 412                spam_public_msg(p, msg, vcoreid, ev_q->ev_flags);
 413                goto wakeup;
 414        }
 415        /* We aren't spamming and we know the default vcore, and now we need to
 416         * figure out which mbox to use.  If they provided an mbox, we'll use
 417         * it.  If not, we'll use a VCPD mbox (public or private, depending on
 418         * the flags). */
 419        ev_mbox = ev_q->ev_mbox;
 420        if (!ev_mbox)
 421                ev_mbox = get_vcpd_mbox(vcoreid, ev_q->ev_flags);
 422        /* At this point, we ought to have the right mbox to send the msg to,
 423         * and which vcore to alert (IPI/INDIR) (if applicable).  The mbox could
 424         * be the vcore's vcpd ev_mbox. */
 425        if (!ev_mbox) {
 426                /* This shouldn't happen any more, this is more for sanity's
 427                 * sake */
 428                warn("[kernel] ought to have an mbox by now!");
 429                goto out;
 430        }
 431        /* Even if we're using an mbox in procdata (VCPD), we want a user
 432         * pointer */
 433        if (!is_user_rwaddr(ev_mbox, sizeof(struct event_mbox))) {
 434                /* Ought to kill them, just warn for now */
 435                printk("[kernel] Illegal addr for ev_mbox\n");
 436                goto out;
 437        }
 438        post_ev_msg(p, ev_mbox, msg, ev_q->ev_flags);
 439        wmb();  /* ensure ev_msg write is before alerting the vcore */
 440        /* Prod/alert a vcore with an IPI or INDIR, if desired.  INDIR will also
 441         * call try_notify (IPI) later */
 442        if (ev_q->ev_flags & EVENT_INDIR) {
 443                send_indir(p, ev_q, vcoreid);
 444        } else {
 445                /* they may want an IPI despite not wanting an INDIR */
 446                try_notify(p, vcoreid, ev_q->ev_flags);
 447        }
 448wakeup:
 449        if ((ev_q->ev_flags & EVENT_WAKEUP) && (p->state == PROC_WAITING))
 450                proc_wakeup(p);
 451        /* Fall through */
 452out:
 453        /* Return to the old address space. */
 454        switch_back(p, old_proc);
 455}
 456
 457/* Send an event for the kernel event ev_num.  These are the "one sided" kernel
 458 * initiated events, that require a lookup of the ev_q in procdata.  This is
 459 * roughly equivalent to the old "proc_notify()" */
 460void send_kernel_event(struct proc *p, struct event_msg *msg, uint32_t vcoreid)
 461{
 462        uint16_t ev_num = msg->ev_type;
 463        assert(ev_num < MAX_NR_EVENT);          /* events start at 0 */
 464        struct event_queue *ev_q = p->procdata->kernel_evts[ev_num];
 465        /* linux would put a rmb_depends() here too, i think. */
 466        if (ev_q)
 467                send_event(p, ev_q, msg, vcoreid);
 468}
 469
 470/* Writes the msg to the vcpd mbox of the vcore.  If you want the private mbox,
 471 * send in the ev_flag EVENT_VCORE_PRIVATE.  If not, the message could
 472 * be received by other vcores if the given vcore is offline/preempted/etc.
 473 * Whatever other flags you pass in will get sent to post_ev_msg.  Currently,
 474 * the only one that will get looked at is NO_MSG (set a bit).
 475 *
 476 * This needs to load current (switch_to), but doesn't need to care about what
 477 * the process wants.  Note this isn't commonly used - just the monitor and
 478 * sys_self_notify(). */
 479void post_vcore_event(struct proc *p, struct event_msg *msg, uint32_t vcoreid,
 480                      int ev_flags)
 481{
 482        /* Need to set p as current to post the event */
 483        struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
 484        uintptr_t old_proc = switch_to(p);
 485
 486        assert(proc_vcoreid_is_safe(p, vcoreid));
 487        /* *ev_mbox is the user address of the vcpd mbox */
 488        post_vc_msg(p, vcoreid, get_vcpd_mbox(vcoreid, ev_flags), msg, ev_flags);
 489        switch_back(p, old_proc);
 490}
 491
 492/* Attempts to send a posix signal to the process.  If they do not have an ev_q
 493 * registered for EV_POSIX_SIGNAL, then nothing will happen. */
 494void send_posix_signal(struct proc *p, int sig_nr)
 495{
 496        struct event_msg local_msg = {0};
 497        local_msg.ev_type = EV_POSIX_SIGNAL;
 498        local_msg.ev_arg1 = sig_nr;
 499        send_kernel_event(p, &local_msg, 0);
 500}
 501