akaros/tests/linux-lock-hacks.h
<<
>>
Prefs
   1/* basic locking code that compiles on linux.  #included directly into
   2 * lock_test.  It's a .h so that make tests doesn't build it. */
   3
   4#define ARCH_CL_SIZE 64
   5#define SPINLOCK_INITIALIZER {FALSE}
   6
   7typedef struct {
   8        bool locked;
   9} spinlock_t;
  10
  11void __attribute__((noinline)) spinlock_init(spinlock_t *lock)
  12{
  13        lock->locked = FALSE;
  14}
  15
  16/* Returns TRUE if we grabbed the lock */
  17bool __attribute__((noinline)) spinlock_trylock(spinlock_t *lock)
  18{
  19        if (lock->locked)
  20                return FALSE;
  21        return !__sync_lock_test_and_set(&lock->locked, TRUE);
  22}
  23
  24void __attribute__((noinline)) spinlock_lock(spinlock_t *lock) 
  25{
  26        while (!spinlock_trylock(lock))
  27                cpu_relax();
  28}
  29
  30void __attribute__((noinline)) spinlock_unlock(spinlock_t *lock) 
  31{
  32        __sync_lock_release(&lock->locked, FALSE);
  33}
  34
  35#define MCS_LOCK_INIT {0}
  36#define MCS_QNODE_INIT {0, 0}
  37
  38typedef struct mcs_lock_qnode
  39{
  40        struct mcs_lock_qnode *next;
  41        int locked;
  42}__attribute__((aligned(ARCH_CL_SIZE))) mcs_lock_qnode_t;
  43
  44/* included for the dummy init in lock_thread */
  45struct mcs_pdro_qnode
  46{
  47        struct mcs_pdro_qnode *next;
  48        int locked;
  49        uint32_t vcoreid;
  50}__attribute__((aligned(ARCH_CL_SIZE)));
  51
  52#define MCSPDRO_QNODE_INIT {0, 0, 0}
  53#define mcs_pdr_init(args...) {}
  54
  55typedef struct mcs_lock
  56{
  57        mcs_lock_qnode_t *lock;
  58} mcs_lock_t;
  59
  60void __attribute__((noinline)) mcs_lock_init(struct mcs_lock *lock)
  61{
  62        memset(lock, 0, sizeof(mcs_lock_t));
  63}
  64
  65static inline mcs_lock_qnode_t *mcs_qnode_swap(mcs_lock_qnode_t **addr,
  66                                               mcs_lock_qnode_t *val)
  67{
  68        return (mcs_lock_qnode_t*) __sync_lock_test_and_set((void**)addr, val);
  69}
  70
  71void __attribute__((noinline))
  72mcs_lock_lock(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
  73{
  74        qnode->next = 0;
  75        cmb();  /* swap provides a CPU mb() */
  76        mcs_lock_qnode_t *predecessor = mcs_qnode_swap(&lock->lock, qnode);
  77        if (predecessor) {
  78                qnode->locked = 1;
  79                wmb();
  80                predecessor->next = qnode;
  81                /* no need for a wrmb(), since this will only get unlocked
  82                 * after they read our previous write */
  83                while (qnode->locked)
  84                        cpu_relax();
  85        }
  86        cmb();  /* just need a cmb, the swap handles the CPU wmb/wrmb() */
  87}
  88
  89void __attribute__((noinline))
  90mcs_lock_unlock(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
  91{
  92        /* Check if someone is already waiting on us to unlock */
  93        if (qnode->next == 0) {
  94                cmb(); /* no need for CPU mbs, since there's an atomic_swap() */
  95                /* Unlock it */
  96                mcs_lock_qnode_t *old_tail = mcs_qnode_swap(&lock->lock,0);
  97                /* no one else was already waiting, so we successfully unlocked
  98                 * and can return */
  99                if (old_tail == qnode)
 100                        return;
 101                /* someone else was already waiting on the lock (last one on
 102                 * the list), and we accidentally took them off.  Try and put
 103                 * it back. */
 104                mcs_lock_qnode_t *usurper = mcs_qnode_swap(&lock->lock,
 105                                                           old_tail);
 106                /* since someone else was waiting, they should have made
 107                 * themselves our next.  spin (very briefly!) til it happens.
 108                 * */
 109                while (qnode->next == 0)
 110                        cpu_relax();
 111                if (usurper) {
 112                        /* an usurper is someone who snuck in before we could
 113                         * put the old tail back.  They now have the lock.
 114                         * Let's put whoever is supposed to be next as their
 115                         * next one. */
 116                        usurper->next = qnode->next;
 117                } else {
 118                        /* No usurper meant we put things back correctly, so we
 119                         * should just pass the lock / unlock whoever is next */
 120                        qnode->next->locked = 0;
 121                }
 122        } else {
 123                /* mb()s necessary since we didn't call an atomic_swap() */
 124                /* need to make sure any previous writes don't pass unlocking */
 125                wmb();
 126                /* need to make sure any reads happen before the unlocking */
 127                rwmb();
 128                /* simply unlock whoever is next */
 129                qnode->next->locked = 0;
 130        }
 131}
 132
 133/* CAS style mcs locks, kept around til we use them.  We're using the
 134 * usurper-style, since RISCV doesn't have a real CAS (yet?). */
 135void __attribute__((noinline))
 136mcs_lock_unlock_cas(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
 137{
 138        /* Check if someone is already waiting on us to unlock */
 139        if (qnode->next == 0) {
 140                cmb();  /* no need for CPU mbs, since there's an atomic_cas() */
 141                /* If we're still the lock, just swap it with 0 (unlock) and
 142                 * return */
 143                if (__sync_bool_compare_and_swap((void**)&lock->lock, qnode, 0))
 144                        return;
 145                /* We failed, someone is there and we are some (maybe a
 146                 * different) thread's pred.  Since someone else was waiting,
 147                 * they should have made themselves our next.  Spin (very
 148                 * briefly!) til it happens. */
 149                while (qnode->next == 0)
 150                        cpu_relax();
 151                /* Alpha wants a read_barrier_depends() here */
 152                /* Now that we have a next, unlock them */
 153                qnode->next->locked = 0;
 154        } else {
 155                /* mb()s necessary since we didn't call an atomic_swap() */
 156                /* need to make sure any previous writes don't pass unlocking */
 157                wmb();
 158                /* need to make sure any reads happen before the unlocking */
 159                rwmb();
 160                /* simply unlock whoever is next */
 161                qnode->next->locked = 0;
 162        }
 163}
 164