* processes. */
#include <ucq.h>
+#include <ceq.h>
#include <bitmask.h>
#include <event.h>
#include <atomic.h>
atomic_or(&vcpd->flags, VC_CAN_RCV_MSG);
}
-/* Posts a message to the mbox, subject to flags. Feel free to send 0 for the
- * flags if you don't want to give them the option of EVENT_NOMSG (which is what
- * we do when sending an indirection event). Make sure that if mbox is a user
- * pointer, that you've checked it *and* have that processes address space
- * loaded. This can get called with a KVA for mbox. */
+static void send_evbitmap_msg(struct evbitmap *evbm, struct event_msg *msg)
+{
+ SET_BITMASK_BIT_ATOMIC(evbm->bitmap, msg->ev_type);
+ wmb();
+ evbm->check_bits = TRUE;
+}
+
+/* Posts a message to the mbox. mbox is a pointer to user-accessible memory.
+ * If mbox is a user-provided pointer, make sure that you've checked it.
+ * Regardless make sure you have that process's address space loaded. */
static void post_ev_msg(struct proc *p, struct event_mbox *mbox,
struct event_msg *msg, int ev_flags)
{
printd("[kernel] Sending event type %d to mbox %p\n", msg->ev_type, mbox);
/* Sanity check */
assert(p);
- /* If they just want a bit (NOMSG), just set the bit */
- if (ev_flags & EVENT_NOMSG) {
- SET_BITMASK_BIT_ATOMIC(mbox->ev_bitmap, msg->ev_type);
- wmb();
- mbox->ev_check_bits = TRUE;
- } else {
- send_ucq_msg(&mbox->ev_msgs, p, msg);
+ switch (mbox->type) {
+ case (EV_MBOX_UCQ):
+ send_ucq_msg(&mbox->ucq, p, msg);
+ break;
+ case (EV_MBOX_BITMAP):
+ send_evbitmap_msg(&mbox->evbm, msg);
+ break;
+ case (EV_MBOX_CEQ):
+ send_ceq_msg(&mbox->ceq, p, msg);
+ break;
+ default:
+ printk("[kernel] Unknown mbox type %d!\n", mbox->type);
}
}
uint32_t vcoreid, int ev_flags)
{
struct vcore *vc;
+ if (!__proc_is_mcp(p)) {
+ spam_vcore(p, 0, ev_msg, ev_flags);
+ return;
+ }
if (ev_flags & EVENT_VCORE_MUST_RUN) {
/* Could check for waiting and skip these spams, which will fail. Could
* also skip trying for vcoreid, and just spam any old online VC. */
vc = TAILQ_FIRST(&p->inactive_vcs);
if (vc) { /* might be none in rare circumstances */
if (try_spam_vcore(p, vcore2vcoreid(p, vc), ev_msg, ev_flags)) {
- /* Need to ensure the proc wakes up, but only if it was WAITING.
- * One way for this to happen is if a normal vcore was preempted
- * right as another vcore was yielding, and the preempted
- * message was sent after the last vcore yielded (which caused
- * us to be WAITING */
- if (p->state == PROC_WAITING)
- proc_wakeup(p); /* internally, this double-checks WAITING */
+ /* It's possible that we're WAITING here. EVENT_WAKEUP will handle
+ * it. One way for this to happen is if a normal vcore was
+ * preempted right as another vcore was yielding, and the preempted
+ * message was sent after the last vcore yielded (which caused us to
+ * be WAITING). */
return;
}
}
* grabs the vmr_lock and pte_lock. */
spin_lock(&p->proc_lock);
if (p->state != PROC_WAITING) {
- /* We need to check the online and bulk_preempt lists again, now that we are
- * sure no one is messing with them. If we're WAITING, we can skip
+ /* We need to check the online and bulk_preempt lists again, now that we
+ * are sure no one is messing with them. If we're WAITING, we can skip
* these (or assert they are empty!). */
vc = TAILQ_FIRST(&p->online_vcs);
if (vc) {
* above */
set_vcore_msgable(vcore2vcoreid(p, vc));
/* The first event to catch the process with no online/bp vcores will need
- * to wake it up. (We could be RUNNABLE_M here if another event already woke
- * us.) and we didn't get lucky with the penultimate fallback.
- * proc_wakeup (and __proc_wakeup()) will check for WAITING. */
+ * to wake it up, which is handled elsewhere if they requested EVENT_WAKEUP.
+ * We could be RUNNABLE_M here if another event already woke us and we
+ * didn't get lucky with the penultimate fallback. */
spin_unlock(&p->proc_lock);
- proc_wakeup(p);
- return;
}
/* Helper: sends an indirection event for an ev_q, preferring vcoreid */
return;
}
/* At this point, we actually want to send and spam an INDIR.
- * This will guarantee the message makes it to some vcore. For flags, we
- * can't send NOMSG - that applied to the original ev_msg. */
- spam_public_msg(p, &local_msg, vcoreid, ev_q->ev_flags & ~EVENT_NOMSG);
+ * This will guarantee the message makes it to some vcore. */
+ spam_public_msg(p, &local_msg, vcoreid, ev_q->ev_flags);
}
/* Send an event to ev_q, based on the parameters in ev_q's flag. We don't
void send_event(struct proc *p, struct event_queue *ev_q, struct event_msg *msg,
uint32_t vcoreid)
{
- struct proc *old_proc;
+ uintptr_t old_proc;
struct event_mbox *ev_mbox = 0;
+
assert(!in_irq_ctx(&per_cpu_info[core_id()]));
assert(p);
- if (p->state == PROC_DYING)
+ if (proc_is_dying(p))
return;
printd("[kernel] sending msg to proc %p, ev_q %p\n", p, ev_q);
if (!ev_q) {
/* ev_q is a user pointer, so we need to make sure we're in the right
* address space */
old_proc = switch_to(p);
- /* If we're an _S, just spam vcore0, and wake up if necessary. */
- if (!__proc_is_mcp(p)) {
- spam_vcore(p, 0, msg, ev_q->ev_flags);
- wrmb(); /* don't let the notif_pending write pass the state read */
- /* using the same pattern as in spam_public (which can have multiple
- * unblock callbacks */
- if (p->state == PROC_WAITING)
- proc_wakeup(p);
- goto out;
- }
/* Get the vcoreid that we'll message (if appropriate). For INDIR and
* SPAMMING, this is the first choice of a vcore, but other vcores might get
* it. Common case is !APPRO and !ROUNDROBIN. Note we are clobbering the
* (via APPRO or whatever). */
if (ev_q->ev_flags & EVENT_SPAM_PUBLIC) {
spam_public_msg(p, msg, vcoreid, ev_q->ev_flags);
- goto out;
+ goto wakeup;
}
/* We aren't spamming and we know the default vcore, and now we need to
* figure out which mbox to use. If they provided an mbox, we'll use it.
printk("[kernel] Illegal addr for ev_mbox\n");
goto out;
}
- /* We used to support no msgs, but quit being lazy and send a 'msg'. If the
- * ev_q is a NOMSG, we won't actually memcpy or anything, it'll just be a
- * vehicle for sending the ev_type. */
- assert(msg);
post_ev_msg(p, ev_mbox, msg, ev_q->ev_flags);
wmb(); /* ensure ev_msg write is before alerting the vcore */
/* Prod/alert a vcore with an IPI or INDIR, if desired. INDIR will also
/* they may want an IPI despite not wanting an INDIR */
try_notify(p, vcoreid, ev_q->ev_flags);
}
+wakeup:
+ if ((ev_q->ev_flags & EVENT_WAKEUP) && (p->state == PROC_WAITING))
+ proc_wakeup(p);
/* Fall through */
out:
/* Return to the old address space. */
{
/* Need to set p as current to post the event */
struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
- struct proc *old_proc = switch_to(p);
+ uintptr_t old_proc = switch_to(p);
+
/* *ev_mbox is the user address of the vcpd mbox */
post_vc_msg(p, vcoreid, get_vcpd_mbox(vcoreid, ev_flags), msg, ev_flags);
switch_back(p, old_proc);