1 /* Copyright (c) 2009-2011 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Kernel atomics and locking functions.
7 * The extern inline declarations are arch-dependent functions. We do this
8 * so that each arch can either static inline or just have a regular function,
9 * whichever is appropriate. The actual implementation usually will be in
10 * arch/atomic.h (for inlines).
12 * The static inlines are defined farther down in the file (as always). */
14 #ifndef ROS_KERN_ATOMIC_H
15 #define ROS_KERN_ATOMIC_H
17 #include <ros/common.h>
18 #include <ros/atomic.h>
20 #include <arch/arch.h>
24 extern inline void atomic_init(atomic_t *number, long val);
25 extern inline long atomic_read(atomic_t *number);
26 extern inline void atomic_set(atomic_t *number, long val);
27 extern inline void atomic_add(atomic_t *number, long val);
28 extern inline void atomic_inc(atomic_t *number);
29 extern inline void atomic_dec(atomic_t *number);
30 extern inline long atomic_fetch_and_add(atomic_t *number, long val);
31 extern inline void atomic_and(atomic_t *number, long mask);
32 extern inline void atomic_or(atomic_t *number, long mask);
33 extern inline long atomic_swap(atomic_t *addr, long val);
34 extern inline bool atomic_cas(atomic_t *addr, long exp_val, long new_val);
35 extern inline bool atomic_cas_ptr(void **addr, void *exp_val, void *new_val);
36 extern inline bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val,
38 extern inline bool atomic_add_not_zero(atomic_t *number, long val);
39 extern inline bool atomic_sub_and_test(atomic_t *number, long val);
43 volatile uint32_t RACY rlock;
44 #ifdef __CONFIG_SPINLOCK_DEBUG__
46 uint32_t calling_core;
49 typedef struct spinlock spinlock_t;
50 #define SPINLOCK_INITIALIZER {0}
52 extern inline void spinlock_init(spinlock_t *lock);
53 extern inline bool spin_locked(spinlock_t *lock);
54 extern inline void spin_lock(spinlock_t *lock);
55 extern inline void spin_unlock(spinlock_t *lock);
56 extern inline void spinlock_debug(spinlock_t *lock);
58 static inline void spin_lock_irqsave(spinlock_t *lock);
59 static inline void spin_unlock_irqsave(spinlock_t *lock);
60 static inline bool spin_lock_irq_enabled(spinlock_t *lock);
62 /* Hash locks (array of spinlocks). Most all users will want the default one,
63 * so point your pointer to one of them, though you could always kmalloc a
64 * bigger one. In the future, they might be growable, etc, which init code may
67 unsigned int nr_entries;
68 struct spinlock locks[];
70 #define HASHLOCK_DEFAULT_SZ 53 /* nice prime, might be a bit large */
71 struct small_hashlock {
72 unsigned int nr_entries;
73 struct spinlock locks[HASHLOCK_DEFAULT_SZ];
76 void hashlock_init(struct hashlock *hl, unsigned int nr_entries);
77 void hash_lock(struct hashlock *hl, long key);
78 void hash_unlock(struct hashlock *hl, long key);
79 void hash_lock_irqsave(struct hashlock *hl, long key);
80 void hash_unlock_irqsave(struct hashlock *hl, long key);
83 /* An example seq lock, built from the counter. I don't particularly like this,
84 * since it forces you to use a specific locking type. */
85 typedef struct seq_lock {
90 static inline void __seq_start_write(seq_ctr_t *seq_ctr);
91 static inline void __seq_end_write(seq_ctr_t *seq_ctr);
92 static inline void write_seqlock(seqlock_t *lock);
93 static inline void write_sequnlock(seqlock_t *lock);
94 static inline seq_ctr_t read_seqbegin(seqlock_t *lock);
95 static inline bool read_seqretry(seqlock_t *lock, seq_ctr_t ctr);
97 /* Arch-specific implementations / declarations go here */
98 #include <arch/atomic.h>
100 #define MAX_SPINS 1000000000
102 /* Will spin for a little while, but not deadlock if it never happens */
104 for (int i = 0; (x); i++) { \
106 if (i == MAX_SPINS) { \
107 printk("Probably timed out/failed.\n"); \
112 /*********************** Checklist stuff **********************/
113 typedef struct checklist_mask {
114 // only need an uint8_t, but we need the bits[] to be word aligned
116 volatile uint8_t (COUNT(BYTES_FOR_BITMASK(size)) bits)[MAX_NUM_CPUS];
119 // mask contains an unspecified array, so it needs to be at the bottom
122 checklist_mask_t mask;
123 // eagle-eyed readers may know why this might have been needed. 2009-09-04
124 //volatile uint8_t (COUNT(BYTES_FOR_BITMASK(size)) bits)[];
126 typedef struct checklist RACY checklist_t;
128 #define ZEROS_ARRAY(size) {[0 ... ((size)-1)] 0}
130 #define DEFAULT_CHECKLIST_MASK(sz) {(sz), ZEROS_ARRAY(BYTES_FOR_BITMASK(sz))}
131 #define DEFAULT_CHECKLIST(sz) {SPINLOCK_INITIALIZER, DEFAULT_CHECKLIST_MASK(sz)}
132 #define INIT_CHECKLIST(nm, sz) \
133 checklist_t nm = DEFAULT_CHECKLIST(sz);
134 #define INIT_CHECKLIST_MASK(nm, sz) \
135 checklist_mask_t nm = DEFAULT_CHECKLIST_MASK(sz);
137 int commit_checklist_wait(checklist_t* list, checklist_mask_t* mask);
138 int commit_checklist_nowait(checklist_t* list, checklist_mask_t* mask);
139 int waiton_checklist(checklist_t* list);
140 int release_checklist(checklist_t* list);
141 int checklist_is_locked(checklist_t* list);
142 int checklist_is_clear(checklist_t* list);
143 void reset_checklist(checklist_t* list);
144 void down_checklist(checklist_t* list);
145 // TODO - do we want to adjust the size? (YES, don't want to check it all)
146 // TODO - do we want to be able to call waiton without having called commit?
147 // - in the case of protected checklists
148 // TODO - want a destroy checklist (when we have kmalloc, or whatever)
149 // TODO - some sort of dynamic allocation of them in the future
150 // TODO - think about deadlock issues with one core spinning on a lock for
151 // something that it is the hold out for...
152 // - probably should have interrupts enabled, and never grab these locks
153 // from interrupt context (and not use irq_save)
154 /**************************************************************/
156 /* Barrier: currently made for everyone barriering. Change to use checklist */
160 uint32_t current_count;
161 volatile uint8_t ready;
164 typedef struct barrier RACY barrier_t;
166 void init_barrier(barrier_t*COUNT(1) barrier, uint32_t count);
167 void reset_barrier(barrier_t* barrier);
168 void waiton_barrier(barrier_t* barrier);
170 /* Spinlock bit flags */
171 #define SPINLOCK_IRQ_EN 0x80000000
173 // If ints are enabled, disable them and note it in the top bit of the lock
174 // There is an assumption about releasing locks in order here...
175 static inline void spin_lock_irqsave(spinlock_t *SAFE lock)
178 irq_en = irq_is_enabled();
182 lock->rlock |= SPINLOCK_IRQ_EN;
185 // if the high bit of the lock is set, then re-enable interrupts
186 // (note from asw: you're lucky this works, you little-endian jerks)
187 static inline void spin_unlock_irqsave(spinlock_t *SAFE lock)
189 if (spin_lock_irq_enabled(lock)) {
196 /* Returns whether or not unlocking this lock should enable interrupts or not.
197 * Is meaningless on locks that weren't locked with irqsave. */
198 static inline bool spin_lock_irq_enabled(spinlock_t *SAFE lock)
200 return lock->rlock & SPINLOCK_IRQ_EN;
203 /* Note, the seq_ctr is not a full seq lock - just the counter guts. Write
204 * access can be controlled by another lock (like the proc-lock). start_ and
205 * end_write are the writer's responsibility to signal the readers of a
206 * concurrent write. */
207 static inline void __seq_start_write(seq_ctr_t *seq_ctr)
209 #ifdef _CONFIG_SEQLOCK_DEBUG_
210 assert(*seq_ctr % 2 == 0);
213 /* We're the only writer, so we need to prevent the compiler (and some
214 * arches) from reordering writes before this point. */
218 static inline void __seq_end_write(seq_ctr_t *seq_ctr)
220 #ifdef _CONFIG_SEQLOCK_DEBUG_
221 assert(*seq_ctr % 2 == 1);
223 /* Need to prevent the compiler (and some arches) from reordering older
229 /* Untested reference implementation of a seq lock. As mentioned above, we
230 * might need a variety of these (for instance, this doesn't do an irqsave). Or
231 * there may be other invariants that we need the lock to protect. */
232 static inline void write_seqlock(seqlock_t *lock)
234 spin_lock(&lock->w_lock);
235 __seq_start_write(&lock->r_ctr);
238 static inline void write_sequnlock(seqlock_t *lock)
240 __seq_end_write(&lock->r_ctr);
241 spin_unlock(&lock->w_lock);
244 static inline seq_ctr_t read_seqbegin(seqlock_t *lock)
246 seq_ctr_t retval = lock->r_ctr;
247 rmb(); /* don't want future reads to come before our ctr read */
251 static inline bool read_seqretry(seqlock_t *lock, seq_ctr_t ctr)
253 return seqctr_retry(lock->r_ctr, ctr);
256 #endif /* ROS_KERN_ATOMIC_H */