* processes. */
#include <ucq.h>
+#include <ceq.h>
#include <bitmask.h>
#include <event.h>
#include <atomic.h>
#include <stdio.h>
#include <assert.h>
#include <pmap.h>
+#include <schedule.h>
+
+/* Userspace could give us a vcoreid that causes us to compute a vcpd that is
+ * outside procdata. If we hit UWLIM, then we've gone farther than we should.
+ * We check the vcoreid, instead of the resulting address, to avoid issues like
+ * address wrap-around. */
+static bool vcoreid_is_safe(uint32_t vcoreid)
+{
+ /* MAX_NUM_VCORES == MAX_NUM_CORES (check procinfo/procdata) */
+ return vcoreid < MAX_NUM_CORES;
+}
/* Note these three helpers return the user address of the mbox, not the KVA.
* Load current to access this, and it will work for any process. */
static bool can_msg_vcore(uint32_t vcoreid)
{
struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
- return vcpd->can_rcv_msg;
+ return atomic_read(&vcpd->flags) & VC_CAN_RCV_MSG;
}
/* Says a vcore can be messaged. Only call this once you are sure this is true
static void set_vcore_msgable(uint32_t vcoreid)
{
struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
- vcpd->can_rcv_msg = TRUE;
+ atomic_or(&vcpd->flags, VC_CAN_RCV_MSG);
+}
+
+static void send_evbitmap_msg(struct evbitmap *evbm, struct event_msg *msg)
+{
+ SET_BITMASK_BIT_ATOMIC(evbm->bitmap, msg->ev_type);
+ wmb();
+ evbm->check_bits = TRUE;
}
-/* Posts a message to the mbox, subject to flags. Feel free to send 0 for the
- * flags if you don't want to give them the option of EVENT_NOMSG (which is what
- * we do when sending an indirection event). Make sure that if mbox is a user
- * pointer, that you've checked it *and* have that processes address space
- * loaded. This can get called with a KVA for mbox. */
+/* Posts a message to the mbox. mbox is a pointer to user-accessible memory.
+ * If mbox is a user-provided pointer, make sure that you've checked it.
+ * Regardless make sure you have that process's address space loaded. */
static void post_ev_msg(struct proc *p, struct event_mbox *mbox,
struct event_msg *msg, int ev_flags)
{
- printd("[kernel] Sending event type %d to mbox %08p\n", msg->ev_type, mbox);
+ printd("[kernel] Sending event type %d to mbox %p\n", msg->ev_type, mbox);
/* Sanity check */
assert(p);
- /* If they just want a bit (NOMSG), just set the bit */
- if (ev_flags & EVENT_NOMSG) {
- SET_BITMASK_BIT_ATOMIC(mbox->ev_bitmap, msg->ev_type);
- } else {
- send_ucq_msg(&mbox->ev_msgs, p, msg);
+ switch (mbox->type) {
+ case (EV_MBOX_UCQ):
+ send_ucq_msg(&mbox->ucq, p, msg);
+ break;
+ case (EV_MBOX_BITMAP):
+ send_evbitmap_msg(&mbox->evbm, msg);
+ break;
+ case (EV_MBOX_CEQ):
+ send_ceq_msg(&mbox->ceq, p, msg);
+ break;
+ default:
+ printk("[kernel] Unknown mbox type %d!\n", mbox->type);
}
}
* sure it is mapped (slight optimization) */
static void try_notify(struct proc *p, uint32_t vcoreid, int ev_flags)
{
+ /* Note this is an unlocked-peek at the vcoremap */
if ((ev_flags & EVENT_IPI) && vcore_is_mapped(p, vcoreid))
proc_notify(p, vcoreid);
}
/* Helper: sends the message and an optional IPI to the vcore. Sends to the
- * public mbox. This is meant for spammy messages. */
+ * public mbox. */
static void spam_vcore(struct proc *p, uint32_t vcoreid,
struct event_msg *ev_msg, int ev_flags)
{
try_notify(p, vcoreid, ev_flags);
}
-/* Attempts to message a vcore that may or may not have 'can_rcv_msg' set. If
+/* Attempts to message a vcore that may or may not have VC_CAN_RCV_MSG set. If
* so, we'll post the message and the message will eventually get dealt with
* (when the vcore runs or when it is preempte-recovered). */
static bool try_spam_vcore(struct proc *p, uint32_t vcoreid,
* something more customized for the lists. */
spam_vcore(p, vcoreid, ev_msg, ev_flags);
wrmb(); /* prev write (notif_pending) must come before following reads*/
- /* if they are still alertable after we sent the msg, then they'll get
- * it before yielding (racing with userspace yield here). This check is
- * not as critical as the next one, but will allow us to alert vcores
- * that happen to concurrently be moved from the active to the
- * bulk_preempt list. */
- if (can_msg_vcore(vcoreid))
- return TRUE;
- /* As a backup, if they are still the first on the list, then they are
+ /* I used to check can_msg_vcore(vcoreid) here, but that would make
+ * spamming list members unusable for MUST_RUN scenarios.
+ *
+ * Regardless, if they are still the first on the list, then they are
* still going to get the message. For the online list, proc_yield()
* will return them to userspace (where they will get the message)
* because __alert_vcore() set notif_pending. For the BP list, they
* will either be turned on later, or have a preempt message sent about
* their demise.
*
- * We race on list membership (and not exclusively 'can_rcv_msg', so
+ * We race on list membership (and not exclusively VC_CAN_RCV_MSG, so
* that when it fails we can get a new vcore to try (or know WHP there
* are none). */
vc_first = TAILQ_FIRST(list);
/* This makes sure ev_msg is sent to some vcore, preferring vcoreid.
*
- * One of the goals of FALLBACK (and this func) is to allow processes to yield
+ * One of the goals of SPAM_INDIR (and this func) is to allow processes to yield
* cores without fear of losing messages. Even when yielding and getting
* preempted, if your message is spammed, it will get to some vcore. If
* MUST_RUN is set, it'll get to a running vcore. Messages that you send like
* this must be able to handle spurious reads, since more than one vcore is
* likely to get the message and handle it.
*
- * We try the desired vcore, using 'can_rcv_msg'. Failing that, we'll search
+ * We try the desired vcore, using VC_CAN_RCV_MSG. Failing that, we'll search
* the online and then the bulk_preempted lists. These lists serve as a way to
* find likely messageable vcores. spam_list_member() helps us with them,
* failing if anything seems to go wrong. At which point we just lock and try
uint32_t vcoreid, int ev_flags)
{
struct vcore *vc;
- /* First, try posting to the desired vcore (so long as we don't have to send
- * it to a vcore that will run, like we do for preempt messages). */
- if (!(ev_flags & EVENT_VCORE_MUST_RUN) &&
- (try_spam_vcore(p, vcoreid, ev_msg, ev_flags)))
+ if (!__proc_is_mcp(p)) {
+ spam_vcore(p, 0, ev_msg, ev_flags);
+ return;
+ }
+ if (ev_flags & EVENT_VCORE_MUST_RUN) {
+ /* Could check for waiting and skip these spams, which will fail. Could
+ * also skip trying for vcoreid, and just spam any old online VC. */
+ if (vcore_is_mapped(p, vcoreid)) { /* check, signal, check again */
+ spam_vcore(p, vcoreid, ev_msg, ev_flags);
+ wrmb(); /* notif_pending write must come before following read */
+ if (vcore_is_mapped(p, vcoreid))
+ return;
+ }
+ if (spam_list_member(&p->online_vcs, p, ev_msg, ev_flags))
+ return;
+ goto ultimate_fallback;
+ }
+ /* First, try posting to the desired vcore */
+ if (try_spam_vcore(p, vcoreid, ev_msg, ev_flags))
return;
/* If the process is WAITING, let's just jump to the fallback */
if (p->state == PROC_WAITING)
vc = TAILQ_FIRST(&p->inactive_vcs);
if (vc) { /* might be none in rare circumstances */
if (try_spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags)) {
- /* Need to ensure the proc wakes up, but only if it was WAITING.
- * One way for this to happen is if a normal vcore was preempted
- * right as another vcore was yielding, and the preempted
- * message was sent after the last vcore yielded (which caused
- * us to be WAITING */
- if (p->state == PROC_WAITING) {
- spin_lock(&p->proc_lock);
- __proc_wakeup(p); /* internally, this double-checks WAITING */
- spin_unlock(&p->proc_lock);
- }
+ /* It's possible that we're WAITING here. EVENT_WAKEUP will handle
+ * it. One way for this to happen is if a normal vcore was
+ * preempted right as another vcore was yielding, and the preempted
+ * message was sent after the last vcore yielded (which caused us to
+ * be WAITING). */
return;
}
}
* empty and the process is simply WAITING (yielded all of its vcores and is
* waiting on an event). Time for the ultimate fallback: locking. Note
* that when we __alert_vcore(), there is a chance we need to mmap, which
- * grabs the mm_lock. */
+ * grabs the vmr_lock and pte_lock. */
spin_lock(&p->proc_lock);
if (p->state != PROC_WAITING) {
- /* We need to check the online and bulk_preempt lists again, now that we are
- * sure no one is messing with them. If we're WAITING, we can skip
+ /* We need to check the online and bulk_preempt lists again, now that we
+ * are sure no one is messing with them. If we're WAITING, we can skip
* these (or assert they are empty!). */
vc = TAILQ_FIRST(&p->online_vcs);
if (vc) {
* above */
set_vcore_msgable(vcore2vcoreid(p, vc));
/* The first event to catch the process with no online/bp vcores will need
- * to wake it up. (We could be RUNNABLE_M here if another event already woke
- * us.) and we didn't get lucky with the penultimate fallback.
- * __proc_wakeup() will check for WAITING. */
- __proc_wakeup(p);
+ * to wake it up, which is handled elsewhere if they requested EVENT_WAKEUP.
+ * We could be RUNNABLE_M here if another event already woke us and we
+ * didn't get lucky with the penultimate fallback. */
spin_unlock(&p->proc_lock);
- return;
}
/* Helper: sends an indirection event for an ev_q, preferring vcoreid */
wmb(); /* force this write to happen before any event writes */
local_msg.ev_type = EV_EVENT;
local_msg.ev_arg3 = ev_q;
- /* Don't care about FALLBACK, just send and be done with it. TODO:
- * considering getting rid of FALLBACK as an option and making it mandatory
- * when you want an INDIR. Having trouble thinking of when you'd want an
- * INDIR but not a FALLBACK. */
- if (!(ev_q->ev_flags & EVENT_FALLBACK)) {
- printk("[kernel] INDIR requested without FALLBACK, prob a bug.\n");
+ /* If we're not spamming indirs, just send and be done with it.
+ *
+ * It's possible that the user does not want to poll their evq and wants an
+ * INDIR, but also doesn't care about sleeping or otherwise not getting the
+ * message right away. The INDIR could sit in the VCPD of a vcore that
+ * doesn't run for a while. Perhaps if the app always made sure VC 0 was
+ * on when it was running at all, and sent the INDIR there. Or there was a
+ * per-vc evq that only needed to be handled when the VC turned on. This
+ * gets at another aspect of INDIRs, other than it's need for "only once"
+ * operation: maybe the mbox type isn't a UCQ (like the VCPD mboxes). */
+ if (!(ev_q->ev_flags & EVENT_SPAM_INDIR)) {
spam_vcore(p, vcoreid, &local_msg, ev_q->ev_flags);
return;
}
- /* At this point, we actually want to send an INDIR (with FALLBACK).
- * This will guarantee the message makes it to some vcore. For flags, we
- * only want to send flags relevant to spamming messages. */
- spam_public_msg(p, &local_msg, vcoreid, ev_q->ev_flags & EVENT_SPAM_FLAGS);
-}
-
-/* Helper that alerts a vcore, by IPI and/or INDIR, that it needs to check the
- * ev_q. send_indir() eventually Handles FALLBACK and other tricky things.
- * alerted. */
-static void alert_vcore(struct proc *p, struct event_queue *ev_q,
- uint32_t vcoreid)
-{
- /* INDIR will also call try_notify (IPI) later */
- if (ev_q->ev_flags & EVENT_INDIR) {
- send_indir(p, ev_q, vcoreid);
- } else {
- /* they may want an IPI despite not wanting an INDIR */
- try_notify(p, vcoreid, ev_q->ev_flags);
- }
+ /* At this point, we actually want to send and spam an INDIR.
+ * This will guarantee the message makes it to some vcore. */
+ spam_public_msg(p, &local_msg, vcoreid, ev_q->ev_flags);
}
/* Send an event to ev_q, based on the parameters in ev_q's flag. We don't
void send_event(struct proc *p, struct event_queue *ev_q, struct event_msg *msg,
uint32_t vcoreid)
{
- struct proc *old_proc;
+ uintptr_t old_proc;
struct event_mbox *ev_mbox = 0;
+
+ assert(!in_irq_ctx(&per_cpu_info[core_id()]));
assert(p);
- printd("[kernel] sending msg to proc %08p, ev_q %08p\n", p, ev_q);
+ if (proc_is_dying(p))
+ return;
+ printd("[kernel] sending msg to proc %p, ev_q %p\n", p, ev_q);
if (!ev_q) {
warn("[kernel] Null ev_q - kernel code should check before sending!");
return;
}
if (!is_user_rwaddr(ev_q, sizeof(struct event_queue))) {
/* Ought to kill them, just warn for now */
- warn("[kernel] Illegal addr for ev_q");
+ printk("[kernel] Illegal addr for ev_q\n");
+ return;
+ }
+ /* This should be caught by "future technology" that can tell when the
+ * kernel PFs on the user's behalf. For now, we catch common userspace bugs
+ * (had this happen a few times). */
+ if (!PTE_ADDR(ev_q)) {
+ printk("[kernel] Bad addr %p for ev_q\n", ev_q);
return;
}
/* ev_q is a user pointer, so we need to make sure we're in the right
* address space */
old_proc = switch_to(p);
- /* Get the mbox and vcoreid */
- /* If we're going with APPRO, we use the kernel's suggested vcore's ev_mbox.
- * vcoreid is already what the kernel suggests. */
- if (ev_q->ev_flags & EVENT_VCORE_APPRO) {
- /* flags determine if it's private (like a preempt pending) or not */
- ev_mbox = get_vcpd_mbox(vcoreid, ev_q->ev_flags);
- } else { /* common case */
- ev_mbox = ev_q->ev_mbox;
- vcoreid = ev_q->ev_vcore;
- }
- /* Check on the style, which could affect our mbox selection. Other styles
- * would go here (or in similar functions we call to). Important thing is
- * we come out knowing which vcore to send to in the event of an IPI/INDIR,
- * and we know what mbox to post to. */
+ /* Get the vcoreid that we'll message (if appropriate). For INDIR and
+ * SPAMMING, this is the first choice of a vcore, but other vcores might get
+ * it. Common case is !APPRO and !ROUNDROBIN. Note we are clobbering the
+ * vcoreid parameter. */
+ if (!(ev_q->ev_flags & EVENT_VCORE_APPRO))
+ vcoreid = ev_q->ev_vcore; /* use the ev_q's vcoreid */
+ /* Note that RR overwrites APPRO */
if (ev_q->ev_flags & EVENT_ROUNDROBIN) {
- /* Pick a vcore, and if we don't have a mbox yet, pick that vcore's
- * default mbox. Assuming ev_vcore was the previous one used. Note
- * that round-robin overrides the passed-in vcoreid. */
+ /* Pick a vcore, round-robin style. Assuming ev_vcore was the previous
+ * one used. Note that round-robin overrides the passed-in vcoreid.
+ * Also note this may be 'wrong' if num_vcores changes. */
vcoreid = (ev_q->ev_vcore + 1) % p->procinfo->num_vcores;
ev_q->ev_vcore = vcoreid;
- /* Note that the style of not having a specific ev_mbox may go away. I
- * can't think of legitimate uses of this for now, since things that are
- * RR probably are non-vcore-business, and thus inappropriate for a VCPD
- * ev_mbox. */
- if (!ev_mbox)
- ev_mbox = get_vcpd_mbox(vcoreid, ev_q->ev_flags);
}
+ if (!vcoreid_is_safe(vcoreid)) {
+ /* Ought to kill them, just warn for now */
+ printk("[kernel] Vcoreid %d unsafe! (too big?)\n", vcoreid);
+ goto out;
+ }
+ /* If we're a SPAM_PUBLIC, they just want us to spam the message. Note we
+ * don't care about the mbox, since it'll go to VCPD public mboxes, and
+ * we'll prefer to send it to whatever vcoreid we determined at this point
+ * (via APPRO or whatever). */
+ if (ev_q->ev_flags & EVENT_SPAM_PUBLIC) {
+ spam_public_msg(p, msg, vcoreid, ev_q->ev_flags);
+ goto wakeup;
+ }
+ /* We aren't spamming and we know the default vcore, and now we need to
+ * figure out which mbox to use. If they provided an mbox, we'll use it.
+ * If not, we'll use a VCPD mbox (public or private, depending on the
+ * flags). */
+ ev_mbox = ev_q->ev_mbox;
+ if (!ev_mbox)
+ ev_mbox = get_vcpd_mbox(vcoreid, ev_q->ev_flags);
/* At this point, we ought to have the right mbox to send the msg to, and
- * which vcore to send an IPI to (if we send one). The mbox could be the
- * vcore's vcpd ev_mbox. The vcoreid only matters for IPIs and INDIRs. */
+ * which vcore to alert (IPI/INDIR) (if applicable). The mbox could be the
+ * vcore's vcpd ev_mbox. */
if (!ev_mbox) {
- /* this is a process error */
+ /* This shouldn't happen any more, this is more for sanity's sake */
warn("[kernel] ought to have an mbox by now!");
goto out;
}
/* Even if we're using an mbox in procdata (VCPD), we want a user pointer */
if (!is_user_rwaddr(ev_mbox, sizeof(struct event_mbox))) {
/* Ought to kill them, just warn for now */
- warn("[kernel] Illegal addr for ev_mbox");
+ printk("[kernel] Illegal addr for ev_mbox\n");
goto out;
}
- /* We used to support no msgs, but quit being lazy and send a 'msg'. If the
- * ev_q is a NOMSG, we won't actually memcpy or anything, it'll just be a
- * vehicle for sending the ev_type. */
- assert(msg);
post_ev_msg(p, ev_mbox, msg, ev_q->ev_flags);
- wmb(); /* ensure ev_msg write is before alert_vcore() */
- /* Help out userspace a bit by checking for a potentially confusing bug */
- if ((ev_mbox == get_vcpd_mbox_pub(vcoreid)) &&
- (ev_q->ev_flags & EVENT_INDIR))
- printk("[kernel] User-bug: ev_q has an INDIR with a VCPD ev_mbox!\n");
- /* Prod/alert a vcore with an IPI or INDIR, if desired */
- if ((ev_q->ev_flags & (EVENT_IPI | EVENT_INDIR)))
- alert_vcore(p, ev_q, vcoreid);
+ wmb(); /* ensure ev_msg write is before alerting the vcore */
+ /* Prod/alert a vcore with an IPI or INDIR, if desired. INDIR will also
+ * call try_notify (IPI) later */
+ if (ev_q->ev_flags & EVENT_INDIR) {
+ send_indir(p, ev_q, vcoreid);
+ } else {
+ /* they may want an IPI despite not wanting an INDIR */
+ try_notify(p, vcoreid, ev_q->ev_flags);
+ }
+wakeup:
+ if ((ev_q->ev_flags & EVENT_WAKEUP) && (p->state == PROC_WAITING))
+ proc_wakeup(p);
/* Fall through */
out:
/* Return to the old address space. */
{
/* Need to set p as current to post the event */
struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
- struct proc *old_proc = switch_to(p);
+ uintptr_t old_proc = switch_to(p);
+
/* *ev_mbox is the user address of the vcpd mbox */
post_vc_msg(p, vcoreid, get_vcpd_mbox(vcoreid, ev_flags), msg, ev_flags);
switch_back(p, old_proc);
}
+
+/* Attempts to send a posix signal to the process. If they do not have an ev_q
+ * registered for EV_POSIX_SIGNAL, then nothing will happen. */
+void send_posix_signal(struct proc *p, int sig_nr)
+{
+ struct event_msg local_msg = {0};
+ local_msg.ev_type = EV_POSIX_SIGNAL;
+ local_msg.ev_arg1 = sig_nr;
+ send_kernel_event(p, &local_msg, 0);
+}