void init_timer_chain(struct timer_chain *tchain,
void (*set_interrupt) (uint64_t, struct timer_chain *))
{
+ spinlock_init_irqsave(&tchain->lock);
TAILQ_INIT(&tchain->waiters);
tchain->set_interrupt = set_interrupt;
reset_tchain_times(tchain);
waiter->wake_up_time = ALARM_POISON_TIME;
waiter->func = func;
if (!func)
- init_sem(&waiter->sem, 0);
+ sem_init_irqsave(&waiter->sem, 0);
+ waiter->has_fired = FALSE; /* so we can check this before arming */
}
/* Give this the absolute time. For now, abs_time is the TSC time that you want
then = now + usec2tsc(usleep);
/* This will go off if we wrap-around the TSC. It'll never happen for legit
* values, but this might catch some bugs with large usleeps. */
- assert(now < then);
+ assert(now <= then);
set_awaiter_abs(waiter, then);
}
* will wake up. o/w, it will call the func ptr stored in the awaiter. */
static void wake_awaiter(struct alarm_waiter *waiter)
{
- if (waiter->func) {
+ waiter->has_fired = TRUE;
+ cmb(); /* enforce the has_fired write before the handlers */
+ if (waiter->func)
waiter->func(waiter);
- } else {
- /* Might encaps this */
- struct kthread *sleeper;
- sleeper = __up_sem(&waiter->sem, TRUE);
- if (sleeper)
- kthread_runnable(sleeper);
- /* Don't touch the sleeper or waiter after making the kthread runnable,
- * since it could be in use on another core (and the waiter can be
- * clobbered as the kthread unwinds its stack). */
- }
+ else
+ sem_up(&waiter->sem); /* IRQs are disabled, can call sem_up directly */
}
/* This is called when an interrupt triggers a tchain, and needs to wake up
- * everyone whose time is up. */
+ * everyone whose time is up. Called from IRQ context. */
void trigger_tchain(struct timer_chain *tchain)
{
struct alarm_waiter *i, *temp;
uint64_t now = read_tsc();
bool changed_list = FALSE;
assert(!irq_is_enabled());
+ spin_lock(&tchain->lock);
TAILQ_FOREACH_SAFE(i, &tchain->waiters, next, temp) {
- printd("Trying to wake up %08p who is due at %llu and now is %llu\n",
+ printd("Trying to wake up %p who is due at %llu and now is %llu\n",
i, i->wake_up_time, now);
/* TODO: Could also do something in cases where we're close to now */
if (i->wake_up_time <= now) {
}
/* Need to reset the interrupt no matter what */
reset_tchain_interrupt(tchain);
+ spin_unlock(&tchain->lock);
}
-/* Sets the alarm. If it is a kthread-style alarm (func == 0), sleep on it
- * later. Hold the lock, if applicable. If this is a per-core tchain, the
- * interrupt-disabling ought to suffice. */
-void set_alarm(struct timer_chain *tchain, struct alarm_waiter *waiter)
+/* Helper, inserts the waiter into the tchain, returning TRUE if we still need
+ * to reset the tchain interrupt. Caller holds the lock. */
+static bool __insert_awaiter(struct timer_chain *tchain,
+ struct alarm_waiter *waiter)
{
struct alarm_waiter *i, *temp;
- int8_t irq_state = 0;
-
/* This will fail if you don't set a time */
assert(waiter->wake_up_time != ALARM_POISON_TIME);
- disable_irqsave(&irq_state);
+ /* has_fired tells us if it is on the tchain or not */
+ waiter->has_fired = FALSE;
/* Either the list is empty, or not. */
if (TAILQ_EMPTY(&tchain->waiters)) {
tchain->earliest_time = waiter->wake_up_time;
tchain->latest_time = waiter->wake_up_time;
TAILQ_INSERT_HEAD(&tchain->waiters, waiter, next);
/* Need to turn on the timer interrupt later */
- goto reset_out;
+ return TRUE;
}
/* If not, either we're first, last, or in the middle. Reset the interrupt
* and adjust the tchain's times accordingly. */
tchain->earliest_time = waiter->wake_up_time;
TAILQ_INSERT_HEAD(&tchain->waiters, waiter, next);
/* Changed the first entry; we'll need to reset the interrupt later */
- goto reset_out;
+ return TRUE;
}
/* If there is a tie for last, the newer one will really go last. We need
* to handle equality here since the loop later won't catch it. */
tchain->latest_time = waiter->wake_up_time;
/* Proactively put it at the end if we know we're last */
TAILQ_INSERT_TAIL(&tchain->waiters, waiter, next);
- goto no_reset_out;
+ return FALSE;
}
/* Insert before the first one you are earlier than. This won't scale well
* (TODO) if we have a lot of inserts. The proactive insert_tail up above
TAILQ_FOREACH_SAFE(i, &tchain->waiters, next, temp) {
if (waiter->wake_up_time < i->wake_up_time) {
TAILQ_INSERT_BEFORE(i, waiter, next);
- goto no_reset_out;
+ return FALSE;
}
}
- panic("Could not find a spot for awaiter %08p\n", waiter);
-reset_out:
- reset_tchain_interrupt(tchain);
-no_reset_out:
- enable_irqsave(&irq_state);
- /* TODO: could put some debug stuff here */
+ panic("Could not find a spot for awaiter %p\n", waiter);
+}
+
+/* Sets the alarm. If it is a kthread-style alarm (func == 0), sleep on it
+ * later. This version assumes you have the lock held. That only makes sense
+ * from alarm handlers, which are called with this lock held from IRQ context */
+void __set_alarm(struct timer_chain *tchain, struct alarm_waiter *waiter)
+{
+ if (__insert_awaiter(tchain, waiter))
+ reset_tchain_interrupt(tchain);
+}
+
+/* Sets the alarm. Don't call this from an alarm handler, since you already
+ * have the lock held. Call __set_alarm() instead. */
+void set_alarm(struct timer_chain *tchain, struct alarm_waiter *waiter)
+{
+ spin_lock_irqsave(&tchain->lock);
+ __set_alarm(tchain, waiter);
+ spin_unlock_irqsave(&tchain->lock);
}
-/* Removes waiter from the tchain before it goes off.
- * TODO: handle waiters that already went off. */
-void unset_alarm(struct timer_chain *tchain, struct alarm_waiter *waiter)
+/* Helper, rips the waiter from the tchain, knowing that it is on the list.
+ * Returns TRUE if the tchain interrupt needs to be reset. Callers hold the
+ * lock. */
+static bool __remove_awaiter(struct timer_chain *tchain,
+ struct alarm_waiter *waiter)
{
struct alarm_waiter *temp;
bool reset_int = FALSE; /* whether or not to reset the interrupt */
- int8_t irq_state = 0;
-
- disable_irqsave(&irq_state);
- warn("Code currently assumes the alarm waiter hasn't triggered yet!");
/* Need to make sure earliest and latest are set, in case we're mucking with
* the first and/or last element of the chain. */
if (TAILQ_FIRST(&tchain->waiters) == waiter) {
tchain->latest_time = (temp) ? temp->wake_up_time : ALARM_POISON_TIME;
}
TAILQ_REMOVE(&tchain->waiters, waiter, next);
- if (reset_int)
+ return reset_int;
+}
+
+/* Removes waiter from the tchain before it goes off. Returns TRUE if we
+ * disarmed before the alarm went off, FALSE if it already fired. */
+bool unset_alarm(struct timer_chain *tchain, struct alarm_waiter *waiter)
+{
+ spin_lock_irqsave(&tchain->lock);
+ if (waiter->has_fired) {
+ /* the alarm has already gone off. its not even on this tchain's list,
+ * though the concurrent change to has_fired (specifically, the setting
+ * of it to TRUE), happens under the tchain's lock. As a side note, the
+ * code that sets it to FALSE is called when the waiter is on no chain,
+ * so there is no race on that. */
+ spin_unlock_irqsave(&tchain->lock);
+ return FALSE;
+ }
+ if (__remove_awaiter(tchain, waiter))
+ reset_tchain_interrupt(tchain);
+ spin_unlock_irqsave(&tchain->lock);
+ return TRUE;
+}
+
+/* waiter may be on the tchain, or it might have fired already and be off the
+ * tchain. Either way, this will put the waiter on the list, set to go off at
+ * abs_time. If you know the alarm has fired, don't call this. Just set the
+ * awaiter, and then set_alarm() */
+void reset_alarm_abs(struct timer_chain *tchain, struct alarm_waiter *waiter,
+ uint64_t abs_time)
+{
+ bool reset_int = FALSE; /* whether or not to reset the interrupt */
+ spin_lock_irqsave(&tchain->lock);
+ /* We only need to remove/unset when the alarm has not fired yet. If it
+ * has, it's like a fresh insert */
+ if (!waiter->has_fired)
+ reset_int = __remove_awaiter(tchain, waiter);
+ set_awaiter_abs(waiter, abs_time);
+ /* regardless, we need to be reinserted, which will handle has_fired */
+ if (__insert_awaiter(tchain, waiter) || reset_int)
reset_tchain_interrupt(tchain);
- enable_irqsave(&irq_state);
+ spin_unlock_irqsave(&tchain->lock);
}
/* Attempts to sleep on the alarm. Could fail if you aren't allowed to kthread
* (process limit, etc). Don't call it on a waiter that is an event-handler. */
int sleep_on_awaiter(struct alarm_waiter *waiter)
{
+ int8_t irq_state = 0;
if (waiter->func)
- panic("Tried blocking on a waiter %08p with a func %08p!", waiter,
+ panic("Tried blocking on a waiter %p with a func %p!", waiter,
waiter->func);
/* Put the kthread to sleep. TODO: This can fail (or at least it will be
* able to in the future) and we'll need to handle that. */
- sleep_on(&waiter->sem);
+ sem_down_irqsave(&waiter->sem, &irq_state);
return 0;
}
* - Make sure the interrupt source can find tchain
* - Make sure the interrupt handler calls trigger_tchain(tchain)
* - Make sure you don't clobber an old tchain here (a bug)
- * This implies the function knows how to find its timer source/void */
+ * This implies the function knows how to find its timer source/void
+ *
+ * Called with the tchain lock held, and IRQs disabled. However, we could be
+ * calling this cross-core, and we cannot disable those IRQs (hence the
+ * locking). */
void set_pcpu_alarm_interrupt(uint64_t time, struct timer_chain *tchain)
{
uint64_t rel_usec, now;
- struct timer_chain *pcpui_tchain = &per_cpu_info[core_id()].tchain;
+ int pcoreid = core_id();
+ struct timer_chain *pcpui_tchain = &per_cpu_info[pcoreid].tchain;
+
+ if (pcpui_tchain != tchain) {
+ /* cross-core call. we can simply send an alarm IRQ. the alarm handler
+ * will reset its pcpu timer, based on its current lists. they take an
+ * extra IRQ, but it gets the job done. */
+ /* TODO: using the LAPIC vector is a bit ghetto, since that's x86. But
+ * RISCV ignores the vector field, and we don't have a global IRQ vector
+ * namespace or anything. */
+ send_ipi(pcoreid + (pcpui_tchain - tchain), LAPIC_TIMER_DEFAULT_VECTOR);
+ return;
+ }
if (time) {
/* Arm the alarm. For times in the past, we just need to make sure it
* goes off. */
rel_usec = tsc2usec(time - now);
rel_usec = MAX(rel_usec, 1);
printd("Setting alarm for %llu, it is now %llu, rel_time %llu "
- "tchain %08p\n", time, now, rel_usec, pcpui_tchain);
- /* Note that sparc doesn't honor the one-shot setting, so you might get
- * spurious interrupts. */
+ "tchain %p\n", time, now, rel_usec, pcpui_tchain);
set_core_timer(rel_usec, FALSE);
- /* Make sure the caller is setting the right tchain */
- assert(pcpui_tchain == tchain);
} else {
/* Disarm */
set_core_timer(0, FALSE);
/* Debug helpers */
-/* Disable irqs before calling this, or otherwise protect yourself. */
void print_chain(struct timer_chain *tchain)
{
struct alarm_waiter *i;
- printk("Chain %08p is%s empty, early: %llu latest: %llu\n", tchain,
+ spin_lock_irqsave(&tchain->lock);
+ printk("Chain %p is%s empty, early: %llu latest: %llu\n", tchain,
TAILQ_EMPTY(&tchain->waiters) ? "" : " not",
tchain->earliest_time,
tchain->latest_time);
TAILQ_FOREACH(i, &tchain->waiters, next) {
struct kthread *kthread = TAILQ_FIRST(&i->sem.waiters);
- printk("\tWaiter %08p, time: %llu, kthread: %08p (%08p)\n", i,
- i->wake_up_time, kthread, (kthread ? kthread->proc : 0));
+ printk("\tWaiter %p, time: %llu, kthread: %p (%p) %s\n", i,
+ i->wake_up_time, kthread, (kthread ? kthread->proc : 0),
+ (kthread ? kthread->name : 0));
}
+ spin_unlock_irqsave(&tchain->lock);
}
/* Prints all chains, rather verbosely */
void print_pcpu_chains(void)
{
struct timer_chain *pcpu_chain;
- int8_t irq_state = 0;
printk("PCPU Chains: It is now %llu\n", read_tsc());
- disable_irqsave(&irq_state);
for (int i = 0; i < num_cpus; i++) {
pcpu_chain = &per_cpu_info[i].tchain;
print_chain(pcpu_chain);
}
- enable_irqsave(&irq_state);
}