*
* The static inlines are defined farther down in the file (as always). */
-#ifndef ROS_KERN_ATOMIC_H
-#define ROS_KERN_ATOMIC_H
+#pragma once
#include <ros/common.h>
#include <ros/atomic.h>
/* Spin locks */
struct spinlock {
- volatile uint32_t RACY rlock;
+ volatile uint32_t rlock;
#ifdef CONFIG_SPINLOCK_DEBUG
uintptr_t call_site;
uint32_t calling_core;
extern inline bool spin_locked(spinlock_t *lock);
extern inline void __spin_lock(spinlock_t *lock);
extern inline void __spin_unlock(spinlock_t *lock);
-extern inline void spinlock_debug(spinlock_t *lock);
/* So we can inline a __spin_lock if we want. Even though we don't need this
* if we're debugging, its helpful to keep the include at the same place for
#ifdef CONFIG_SPINLOCK_DEBUG
/* Arch indep, in k/s/atomic.c */
void spin_lock(spinlock_t *lock);
+bool spin_trylock(spinlock_t *lock);
void spin_unlock(spinlock_t *lock);
+void spinlock_debug(spinlock_t *lock);
#else
/* Just inline the arch-specific __ versions */
__spin_lock(lock);
}
+static inline bool spin_trylock(spinlock_t *lock)
+{
+ return __spin_trylock(lock);
+}
+
static inline void spin_unlock(spinlock_t *lock)
{
__spin_unlock(lock);
}
+static inline void spinlock_debug(spinlock_t *lock)
+{
+}
+
#endif /* CONFIG_SPINLOCK_DEBUG */
/* Inlines, defined below */
static inline void spinlock_init(spinlock_t *lock);
static inline void spinlock_init_irqsave(spinlock_t *lock);
static inline void spin_lock_irqsave(spinlock_t *lock);
+static inline bool spin_trylock_irqsave(spinlock_t *lock);
static inline void spin_unlock_irqsave(spinlock_t *lock);
static inline bool spin_lock_irq_enabled(spinlock_t *lock);
};
void poke(struct poke_tracker *tracker, void *arg);
+static inline void poke_init(struct poke_tracker *tracker, void (*func)(void*))
+{
+ tracker->need_to_run = 0;
+ tracker->run_in_progress = 0;
+ tracker->func = func;
+}
+#define POKE_INITIALIZER(f) {.func = f}
+
/* Arch-specific implementations / declarations go here */
#include <arch/atomic.h>
typedef struct checklist_mask {
// only need an uint8_t, but we need the bits[] to be word aligned
uint32_t size;
- volatile uint8_t (COUNT(BYTES_FOR_BITMASK(size)) bits)[MAX_NUM_CPUS];
+ volatile uint8_t bits[MAX_NUM_CORES];
} checklist_mask_t;
// mask contains an unspecified array, so it needs to be at the bottom
// eagle-eyed readers may know why this might have been needed. 2009-09-04
//volatile uint8_t (COUNT(BYTES_FOR_BITMASK(size)) bits)[];
};
-typedef struct checklist RACY checklist_t;
+typedef struct checklist checklist_t;
#define ZEROS_ARRAY(size) {[0 ... ((size)-1)] 0}
int release_checklist(checklist_t* list);
int checklist_is_locked(checklist_t* list);
int checklist_is_clear(checklist_t* list);
+int checklist_is_full(checklist_t* list);
void reset_checklist(checklist_t* list);
void down_checklist(checklist_t* list);
// TODO - do we want to adjust the size? (YES, don't want to check it all)
volatile uint8_t ready;
};
-typedef struct barrier RACY barrier_t;
+typedef struct barrier barrier_t;
-void init_barrier(barrier_t*COUNT(1) barrier, uint32_t count);
+void init_barrier(barrier_t *barrier, uint32_t count);
void reset_barrier(barrier_t* barrier);
void waiton_barrier(barrier_t* barrier);
// If ints are enabled, disable them and note it in the top bit of the lock
// There is an assumption about releasing locks in order here...
-static inline void spin_lock_irqsave(spinlock_t *SAFE lock)
+static inline void spin_lock_irqsave(spinlock_t *lock)
{
uint32_t irq_en;
irq_en = irq_is_enabled();
lock->rlock |= SPINLOCK_IRQ_EN;
}
+static inline bool spin_trylock_irqsave(spinlock_t *lock)
+{
+ uint32_t irq_en = irq_is_enabled();
+
+ disable_irq();
+ if (!spin_trylock(lock)) {
+ if (irq_en)
+ enable_irq();
+ return FALSE;
+ }
+ if (irq_en)
+ lock->rlock |= SPINLOCK_IRQ_EN;
+ return TRUE;
+}
+
// if the high bit of the lock is set, then re-enable interrupts
// (note from asw: you're lucky this works, you little-endian jerks)
-static inline void spin_unlock_irqsave(spinlock_t *SAFE lock)
+static inline void spin_unlock_irqsave(spinlock_t *lock)
{
if (spin_lock_irq_enabled(lock)) {
spin_unlock(lock);
/* Returns whether or not unlocking this lock should enable interrupts or not.
* Is meaningless on locks that weren't locked with irqsave. */
-static inline bool spin_lock_irq_enabled(spinlock_t *SAFE lock)
+static inline bool spin_lock_irq_enabled(spinlock_t *lock)
{
return lock->rlock & SPINLOCK_IRQ_EN;
}
{
return seqctr_retry(lock->r_ctr, ctr);
}
-
-#endif /* ROS_KERN_ATOMIC_H */