MCS locks have a "notification-safe" variant
[akaros.git] / user / parlib / mcs.c
1 #include <vcore.h>
2 #include <mcs.h>
3 #include <arch/atomic.h>
4 #include <string.h>
5 #include <stdlib.h>
6
7 // MCS locks
8 void mcs_lock_init(struct mcs_lock *lock)
9 {
10         memset(lock,0,sizeof(mcs_lock_t));
11 }
12
13 static inline mcs_lock_qnode_t *mcs_qnode_swap(mcs_lock_qnode_t **addr,
14                                                mcs_lock_qnode_t *val)
15 {
16         return (mcs_lock_qnode_t*)atomic_swap((int*)addr,(int)val);
17 }
18
19 void mcs_lock_lock(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
20 {
21         qnode->next = 0;
22         mcs_lock_qnode_t* predecessor = mcs_qnode_swap(&lock->lock,qnode);
23         if(predecessor)
24         {
25                 qnode->locked = 1;
26                 predecessor->next = qnode;
27                 while(qnode->locked)
28                         cpu_relax();
29         }
30 }
31
32 void mcs_lock_unlock(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
33 {
34         if(qnode->next == 0)
35         {
36                 mcs_lock_qnode_t* old_tail = mcs_qnode_swap(&lock->lock,0);
37                 if(old_tail == qnode)
38                         return;
39
40                 mcs_lock_qnode_t* usurper = mcs_qnode_swap(&lock->lock,old_tail);
41                 while(qnode->next == 0);
42                 if(usurper)
43                         usurper->next = qnode->next;
44                 else
45                         qnode->next->locked = 0;
46         }
47         else
48                 qnode->next->locked = 0;
49 }
50
51 /* We don't bother saving the state, like we do with irqsave, since we can use
52  * whether or not we are in vcore context to determine that.  This means you
53  * shouldn't call this from those moments when you fake being in vcore context
54  * (when switching into the TLS, etc). */
55 void mcs_lock_notifsafe(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
56 {
57         if (!in_vcore_context())
58                 disable_notifs(vcore_id());
59         mcs_lock_lock(lock, qnode);
60 }
61
62 void mcs_unlock_notifsafe(struct mcs_lock *lock, struct mcs_lock_qnode *qnode)
63 {
64         mcs_lock_unlock(lock, qnode);
65         if (!in_vcore_context())
66                 enable_notifs(vcore_id());
67 }
68
69 // MCS dissemination barrier!
70 int mcs_barrier_init(mcs_barrier_t* b, size_t np)
71 {
72         if(np > max_vcores())
73                 return -1;
74         b->allnodes = (mcs_dissem_flags_t*)malloc(np*sizeof(mcs_dissem_flags_t));
75         memset(b->allnodes,0,np*sizeof(mcs_dissem_flags_t));
76         b->nprocs = np;
77
78         b->logp = (np & (np-1)) != 0;
79         while(np >>= 1)
80                 b->logp++;
81
82         size_t i,k;
83         for(i = 0; i < b->nprocs; i++)
84         {
85                 b->allnodes[i].parity = 0;
86                 b->allnodes[i].sense = 1;
87
88                 for(k = 0; k < b->logp; k++)
89                 {
90                         size_t j = (i+(1<<k)) % b->nprocs;
91                         b->allnodes[i].partnerflags[0][k] = &b->allnodes[j].myflags[0][k];
92                         b->allnodes[i].partnerflags[1][k] = &b->allnodes[j].myflags[1][k];
93                 } 
94         }
95
96         return 0;
97 }
98
99 void mcs_barrier_wait(mcs_barrier_t* b, size_t pid)
100 {
101         mcs_dissem_flags_t* localflags = &b->allnodes[pid];
102         size_t i;
103         for(i = 0; i < b->logp; i++)
104         {
105                 *localflags->partnerflags[localflags->parity][i] = localflags->sense;
106                 while(localflags->myflags[localflags->parity][i] != localflags->sense);
107         }
108         if(localflags->parity)
109                 localflags->sense = 1-localflags->sense;
110         localflags->parity = 1-localflags->parity;
111 }
112