1 /* Copyright (c) 2013 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * Reader-writer queue locks (sleeping locks).
7 * We favor readers when reading, meaning new readers can move ahead of writers.
8 * Ex: If i have some readers, then a writer, clearly the writer blocks. If
9 * more readers come in, they can just come in and the presence of the writer
12 * You get potential writer starvation, but you also get the property that
13 * if a thread holds a read-lock, that thread can grab the same reader
14 * lock again. A more general statement would be "if some reader holds
15 * a rwlock, then any other thread (including itself) can get an rlock".
17 * Similarly, writers favor other writers. So if a writer is unlocking, it'll
18 * pass the lock to another writer first. Here, there is potential reader
21 * We also pass locks, instead of letting recently woken threads fight for it.
22 * In the case of a reader wakeup, we know that they all will wake up and read.
23 * Instead of having them fight for a lock and then incref, the waker (the last
24 * writer) will up the count and just wake everyone.
26 * This also helps when a writer wants to favor another writer. If we didn't
27 * pass the lock, then a new reader could squeeze in after our old writer
28 * signalled the new writer. Even worse, in this case, the readers that we
29 * didn't wake up are still sleeping, even though a reader now holds the lock.
30 * It won't deadlock, (since eventually the reader will wake the writer, who
31 * wakes the old readers) but it breaks the notion of a RW lock a bit. */
37 void rwinit(struct rwlock *rw_lock)
39 spinlock_init(&rw_lock->lock);
40 atomic_init(&rw_lock->nr_readers, 0);
41 rw_lock->writing = FALSE;
42 cv_init_with_lock(&rw_lock->readers, &rw_lock->lock);
43 cv_init_with_lock(&rw_lock->writers, &rw_lock->lock);
46 void rlock(struct rwlock *rw_lock)
48 /* If we already have a reader, we can just increment and return. This is
49 * the only access to nr_readers outside the lock. All locked uses need to
50 * be aware that the nr could be concurrently increffed (unless it is 0). */
51 if (atomic_add_not_zero(&rw_lock->nr_readers, 1))
53 /* Here's an alternate style: the broadcaster (a writer) will up the readers
54 * count and just wake us. All readers just proceed, instead of fighting to
55 * lock and up the count. The writer 'passed' the rlock to us. */
56 spin_lock(&rw_lock->lock);
57 if (rw_lock->writing) {
58 cv_wait_and_unlock(&rw_lock->readers);
61 atomic_inc(&rw_lock->nr_readers);
62 spin_unlock(&rw_lock->lock);
65 bool canrlock(struct rwlock *rw_lock)
67 if (atomic_add_not_zero(&rw_lock->nr_readers, 1))
69 spin_lock(&rw_lock->lock);
70 if (rw_lock->writing) {
71 spin_unlock(&rw_lock->lock);
74 atomic_inc(&rw_lock->nr_readers);
75 spin_unlock(&rw_lock->lock);
79 void runlock(struct rwlock *rw_lock)
81 spin_lock(&rw_lock->lock);
82 /* sub and test will tell us if we got the refcnt to 0, atomically. syncing
83 * with the atomic_add_not_zero of new readers. Since we're passing the
84 * lock, we need to make sure someone is sleeping. Contrast to the wunlock,
85 * where we can just blindly broadcast and add (potentially == 0). */
86 if (atomic_sub_and_test(&rw_lock->nr_readers, 1) &&
87 rw_lock->writers.nr_waiters) {
88 /* passing the lock to the one writer we signal. */
89 rw_lock->writing = TRUE;
90 __cv_signal(&rw_lock->writers);
92 spin_unlock(&rw_lock->lock);
95 void wlock(struct rwlock *rw_lock)
97 spin_lock(&rw_lock->lock);
98 if (atomic_read(&rw_lock->nr_readers) || rw_lock->writing) {
99 /* If we slept, the lock was passed to us */
100 cv_wait_and_unlock(&rw_lock->writers);
103 rw_lock->writing = TRUE;
104 spin_unlock(&rw_lock->lock);
107 void wunlock(struct rwlock *rw_lock)
109 /* Pass the lock to another writer (we leave writing = TRUE) */
110 spin_lock(&rw_lock->lock);
111 if (rw_lock->writers.nr_waiters) {
112 /* Just waking one */
113 __cv_signal(&rw_lock->writers);
114 spin_unlock(&rw_lock->lock);
117 rw_lock->writing = FALSE;
118 atomic_set(&rw_lock->nr_readers, rw_lock->readers.nr_waiters);
119 __cv_broadcast(&rw_lock->readers);
120 spin_unlock(&rw_lock->lock);