akaros/kern/src/rwlock.c
<<
>>
Prefs
   1/* Copyright (c) 2013 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Reader-writer queue locks (sleeping locks).
   6 *
   7 * We favor readers when reading, meaning new readers can move ahead of writers.
   8 * Ex: If i have some readers, then a writer, clearly the writer blocks.  If
   9 * more readers come in, they can just come in and the presence of the writer
  10 * doesn't stop them.
  11 *
  12 * You get potential writer starvation, but you also get the property that
  13 * if a thread holds a read-lock, that thread can grab the same reader
  14 * lock again.  A more general statement would be "if some reader holds
  15 * a rwlock, then any other thread (including itself) can get an rlock".
  16 *
  17 * Similarly, writers favor other writers.  So if a writer is unlocking, it'll
  18 * pass the lock to another writer first.  Here, there is potential reader
  19 * starvation.
  20 *
  21 * We also pass locks, instead of letting recently woken threads fight for it.
  22 * In the case of a reader wakeup, we know that they all will wake up and read.
  23 * Instead of having them fight for a lock and then incref, the waker (the last
  24 * writer) will up the count and just wake everyone.
  25 *
  26 * This also helps when a writer wants to favor another writer.  If we didn't
  27 * pass the lock, then a new reader could squeeze in after our old writer
  28 * signalled the new writer.  Even worse, in this case, the readers that we
  29 * didn't wake up are still sleeping, even though a reader now holds the lock.
  30 * It won't deadlock, (since eventually the reader will wake the writer, who
  31 * wakes the old readers) but it breaks the notion of a RW lock a bit. */
  32
  33#include <rwlock.h>
  34#include <atomic.h>
  35#include <kthread.h>
  36
  37void rwinit(struct rwlock *rw_lock)
  38{
  39        spinlock_init(&rw_lock->lock);
  40        atomic_init(&rw_lock->nr_readers, 0);
  41        rw_lock->writing = FALSE;
  42        cv_init_with_lock(&rw_lock->readers, &rw_lock->lock);
  43        cv_init_with_lock(&rw_lock->writers, &rw_lock->lock);
  44}
  45
  46void rlock(struct rwlock *rw_lock)
  47{
  48        /* If we already have a reader, we can just increment and return.  This
  49         * is the only access to nr_readers outside the lock.  All locked uses
  50         * need to be aware that the nr could be concurrently increffed (unless
  51         * it is 0). */
  52        if (atomic_add_not_zero(&rw_lock->nr_readers, 1))
  53                return;
  54        /* Here's an alternate style: the broadcaster (a writer) will up the
  55         * readers count and just wake us.  All readers just proceed, instead of
  56         * fighting to lock and up the count.  The writer 'passed' the rlock to
  57         * us. */
  58        spin_lock(&rw_lock->lock);
  59        if (rw_lock->writing) {
  60                cv_wait_and_unlock(&rw_lock->readers);
  61                return;
  62        }
  63        atomic_inc(&rw_lock->nr_readers);
  64        spin_unlock(&rw_lock->lock);
  65}
  66
  67bool canrlock(struct rwlock *rw_lock)
  68{
  69        if (atomic_add_not_zero(&rw_lock->nr_readers, 1))
  70                return TRUE;
  71        spin_lock(&rw_lock->lock);
  72        if (rw_lock->writing) {
  73                spin_unlock(&rw_lock->lock);
  74                return FALSE;
  75        }
  76        atomic_inc(&rw_lock->nr_readers);
  77        spin_unlock(&rw_lock->lock);
  78        return TRUE;
  79}
  80
  81void runlock(struct rwlock *rw_lock)
  82{
  83        spin_lock(&rw_lock->lock);
  84        /* sub and test will tell us if we got the refcnt to 0, atomically.
  85         * syncing with the atomic_add_not_zero of new readers.  Since we're
  86         * passing the lock, we need to make sure someone is sleeping.  Contrast
  87         * to the wunlock, where we can just blindly broadcast and add
  88         * (potentially == 0). */
  89        if (atomic_sub_and_test(&rw_lock->nr_readers, 1) &&
  90                rw_lock->writers.nr_waiters) {
  91                /* passing the lock to the one writer we signal. */
  92                rw_lock->writing = TRUE;
  93                __cv_signal(&rw_lock->writers);
  94        }
  95        spin_unlock(&rw_lock->lock);
  96}
  97
  98void wlock(struct rwlock *rw_lock)
  99{
 100        spin_lock(&rw_lock->lock);
 101        if (atomic_read(&rw_lock->nr_readers) || rw_lock->writing) {
 102                /* If we slept, the lock was passed to us */
 103                cv_wait_and_unlock(&rw_lock->writers);
 104                return;
 105        }
 106        rw_lock->writing = TRUE;
 107        spin_unlock(&rw_lock->lock);
 108}
 109
 110void wunlock(struct rwlock *rw_lock)
 111{
 112        /* Pass the lock to another writer (we leave writing = TRUE) */
 113        spin_lock(&rw_lock->lock);
 114        if (rw_lock->writers.nr_waiters) {
 115                /* Just waking one */
 116                __cv_signal(&rw_lock->writers);
 117                spin_unlock(&rw_lock->lock);
 118                return;
 119        }
 120        rw_lock->writing = FALSE;
 121        atomic_set(&rw_lock->nr_readers, rw_lock->readers.nr_waiters);
 122        __cv_broadcast(&rw_lock->readers);
 123        spin_unlock(&rw_lock->lock);
 124}
 125