1 /* Copyright (c) 2013 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * Kevin Klues <klueska@cs.berkeley.edu>
5 * Spinlocks and Spin-PDR locks (preemption detection/recovery)
7 * This file is part of Parlib.
9 * Parlib is free software: you can redistribute it and/or modify
10 * it under the terms of the Lesser GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
14 * Parlib is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * Lesser GNU General Public License for more details.
19 * See COPYING.LESSER for details on the GNU Lesser General Public License.
20 * See COPYING for details on the GNU General Public License. */
30 void spinlock_init(spinlock_t *lock)
36 int spinlock_trylock(spinlock_t *lock)
39 return __sync_lock_test_and_set(&lock->lock, EBUSY);
42 void spinlock_lock(spinlock_t *lock)
45 while (spinlock_trylock(lock))
49 void spinlock_unlock(spinlock_t *lock)
52 __sync_lock_release(&lock->lock, 0);
55 /* Two different versions, with and without CAS. Default is with CAS. */
56 #ifndef __CONFIG_SPINPDR_NO_CAS__ /* CAS version */
58 /* Spin-PRD locks (preemption detection/recovery). Idea is to CAS and put the
59 * lockholder's vcoreid in the lock, and all spinners ensure that vcore runs. */
60 void spin_pdr_init(struct spin_pdr_lock *pdr_lock)
62 pdr_lock->lock = SPINPDR_UNLOCKED;
65 /* Internal version of the locking func, doesn't care if notifs are disabled */
66 void __spin_pdr_lock(struct spin_pdr_lock *pdr_lock)
68 uint32_t vcoreid = vcore_id();
70 assert(vcoreid != SPINPDR_UNLOCKED);
72 while ((lock_val = pdr_lock->lock) != SPINPDR_UNLOCKED) {
73 ensure_vcore_runs(lock_val);
76 } while (!atomic_cas_u32(&pdr_lock->lock, lock_val, vcoreid));
77 cmb(); /* just need a cmb, the CAS handles the CPU wmb/wrmb() */
80 void __spin_pdr_unlock(struct spin_pdr_lock *pdr_lock)
82 /* could make an arch-dependent 'release barrier' out of these */
83 wmb(); /* Need to prevent the compiler from reordering older stores. */
84 rwmb(); /* And no old reads passing either. x86 makes both mbs a cmb() */
85 pdr_lock->lock = SPINPDR_UNLOCKED;
88 #else /* NON-CAS version */
90 /* Using regular spinlocks, with SPINPDR_VCOREID_UNKNOWN (-1) meaning 'no
91 * lockholder advertised yet'. There are two spots where the lockholder still
92 * holds the lock but hasn't advertised its vcoreid, and in those cases we
93 * ensure all vcores aren't preempted (linear scan). */
94 void spin_pdr_init(struct spin_pdr_lock *pdr_lock)
96 spinlock_init(&pdr_lock->spinlock);
97 pdr_lock->lockholder = SPINPDR_VCOREID_UNKNOWN;
100 void __spin_pdr_lock(struct spin_pdr_lock *pdr_lock)
102 uint32_t vcoreid = vcore_id();
104 assert(vcoreid != SPINPDR_VCOREID_UNKNOWN);
105 while (spinlock_trylock(&pdr_lock->spinlock)) {
106 ensure_tgt = pdr_lock->lockholder;
107 /* ensure will make sure *every* vcore runs if you pass it your self. */
108 if (ensure_tgt == SPINPDR_VCOREID_UNKNOWN)
109 ensure_tgt = vcoreid;
110 ensure_vcore_runs(ensure_tgt);
113 pdr_lock->lockholder = vcoreid;
116 void __spin_pdr_unlock(struct spin_pdr_lock *pdr_lock)
118 pdr_lock->lockholder = SPINPDR_VCOREID_UNKNOWN;
119 spinlock_unlock(&pdr_lock->spinlock);
122 #endif /* __CONFIG_SPINPDR_NO_CAS__ */
124 void spin_pdr_lock(struct spin_pdr_lock *pdr_lock)
126 /* Disable notifs, if we're an _M uthread */
127 uth_disable_notifs();
128 __spin_pdr_lock(pdr_lock);
131 void spin_pdr_unlock(struct spin_pdr_lock *pdr_lock)
133 __spin_pdr_unlock(pdr_lock);
134 /* Enable notifs, if we're an _M uthread */