/* Helper, will make sure the vcore owning qnode is running. If we change to
* that vcore, we'll continue when our vcore gets restarted. If the change
- * fails, it is because the vcore is running, and we'll continue. */
+ * fails, it is because the vcore is running, and we'll continue.
+ *
+ * It's worth noting that changing to another vcore won't hurt correctness.
+ * Even if they are no longer the lockholder, they will be checking preemption
+ * messages and will help break out of the deadlock. So long as we don't
+ * wastefully spin, we're okay. */
void __ensure_qnode_runs(struct mcs_pdr_qnode *qnode)
{
if (!vcore_is_mapped(qnode->vcoreid)) {
ucq->prod_overflow = FALSE;
atomic_set(&ucq->nr_extra_pgs, 0);
atomic_set(&ucq->spare_pg, pg2);
- static_assert(sizeof(struct mcs_lock) <= sizeof(ucq->u_lock));
- mcs_lock_init((struct mcs_lock*)(&ucq->u_lock));
+ static_assert(sizeof(struct mcs_pdr_lock) <= sizeof(ucq->u_lock));
+ mcs_pdr_init((struct mcs_pdr_lock*)(&ucq->u_lock));
ucq->ucq_ready = TRUE;
}
assert(pg1 && pg2);
munmap((void*)pg1, PGSIZE);
munmap((void*)pg2, PGSIZE);
+ mcs_pdr_fini((struct mcs_pdr_lock*)&ucq->u_lock);
}
/* Consumer side, returns 0 on success and fills *msg with the ev_msg. If the
struct msg_container *my_msg;
/* Locking stuff. Would be better with a spinlock, if we had them, since
* this should be lightly contested. */
- struct mcs_lock_qnode local_qn = {0};
- struct mcs_lock *ucq_lock = (struct mcs_lock*)(&ucq->u_lock);
+ struct mcs_pdr_lock *ucq_lock = (struct mcs_pdr_lock*)(&ucq->u_lock);
do {
loop_top:
if (slot_is_good(my_idx))
goto claim_slot;
/* Slot is bad, let's try and fix it */
- mcs_lock_notifsafe(ucq_lock, &local_qn);
+ mcs_pdr_lock(ucq_lock);
/* Reread the idx, in case someone else fixed things up while we
* were waiting/fighting for the lock */
my_idx = atomic_read(&ucq->cons_idx);
if (slot_is_good(my_idx)) {
/* Someone else fixed it already, let's just try to get out */
- mcs_unlock_notifsafe(ucq_lock, &local_qn);
+ mcs_pdr_unlock(ucq_lock);
/* Make sure this new slot has a producer (ucq isn't empty) */
if (my_idx == atomic_read(&ucq->prod_idx))
return -1;
}
/* All fixed up, unlock. Other consumers may lock and check to make
* sure things are done. */
- mcs_unlock_notifsafe(ucq_lock, &local_qn);
+ mcs_pdr_unlock(ucq_lock);
/* Now that everything is fixed, try again from the top */
goto loop_top;
claim_slot: