Merge branch 'sparc-dev' of ssh://waterman@scm.millennium.berkeley.edu/project/cs...
[akaros.git] / user / parlib / src / hart.c
1 #include <hart.h>
2 #include <string.h>
3 #include <stdlib.h>
4 #include <unistd.h>
5 #include <parlib.h>
6
7 // TODO: HART_ALLOCATE_STACKS should be disposed of by means of a better ABI.
8
9 static size_t _hart_current_harts = 1;
10 static hart_lock_t _hart_lock = HART_LOCK_INIT;
11
12 static void hart_abort(const char* str)
13 {
14         write(2,str,strlen(str));
15         abort();
16 }
17
18 #pragma weak hart_entry
19 void hart_entry()
20 {
21         hart_abort("You should write your own damn hart_entry()!\n");
22 }
23
24 static void _hart_init()
25 {
26         static int initialized = 0;
27         if(initialized)
28                 return;
29
30         initialized = 1;
31
32         #ifdef HART_ALLOCATE_STACKS
33         extern void** stack_ptr_array;
34         stack_ptr_array = (void**)calloc(hart_max_harts(),sizeof(void*));
35         if(stack_ptr_array == NULL)
36                 hart_abort("Harts initialization ran out of memory!\n");
37         #endif
38 }
39
40 error_t hart_request(size_t k)
41 {
42         size_t i,j;
43         const int user_stack_size = 1024*1024;
44
45         #ifdef HART_ALLOCATE_STACKS
46         extern void** stack_ptr_array;
47         #endif
48
49         _hart_init();
50
51         hart_lock_lock(&_hart_lock);
52
53         if(k < 0 || _hart_current_harts+k > hart_max_harts())
54                 return -1;
55
56         #ifdef HART_ALLOCATE_STACKS
57         for(i = _hart_current_harts; i < _hart_current_harts+k; i++)
58         {
59                 char* stack = (char*)malloc(user_stack_size);
60                 if(stack == NULL)
61                 {
62                         for(j = _hart_current_harts; j < i; j++)
63                         {
64                                 free(stack_ptr_array[j]);
65                                 stack_ptr_array[j] = 0;
66                         }
67                         hart_lock_unlock(&_hart_lock);
68                         return -ENOMEM;
69                 }
70                 stack_ptr_array[i] = stack + user_stack_size;
71         }
72         #endif
73
74         error_t ret;
75         if((ret = sys_resource_req(0,_hart_current_harts+k,0)) == 0)
76         {
77                 _hart_current_harts += k;
78                 hart_lock_unlock(&_hart_lock);
79                 return 0;
80         }
81
82         #ifdef HART_ALLOCATE_STACKS
83         for(i = _hart_current_harts; i < _hart_current_harts+k; i++)
84         {
85                 free(stack_ptr_array[i]);
86                 stack_ptr_array[i] = 0;
87         }
88         #endif
89
90         hart_lock_unlock(&_hart_lock);
91         return ret;
92 }
93
94 void hart_yield()
95 {
96         syscall(SYS_yield,0,0,0,0,0);
97 }
98
99 size_t hart_max_harts()
100 {
101         return procinfo.max_harts < HART_MAX_MAX_HARTS ? procinfo.max_harts : HART_MAX_MAX_HARTS;
102 }
103
104 // MCS locks!!
105 void hart_lock_init(hart_lock_t* lock)
106 {
107         memset(lock,0,sizeof(hart_lock_t));
108 }
109
110 static inline hart_lock_qnode_t* hart_qnode_swap(hart_lock_qnode_t** addr, hart_lock_qnode_t* val)
111 {
112         return (hart_lock_qnode_t*)hart_swap((size_t*)addr,(size_t)val);
113 }
114
115 void hart_lock_lock(hart_lock_t* lock)
116 {
117         hart_lock_qnode_t* qnode = &lock->qnode[hart_self()];
118         qnode->next = 0;
119         hart_lock_qnode_t* predecessor = hart_qnode_swap(&lock->lock,qnode);
120         if(predecessor)
121         {
122                 qnode->locked = 1;
123                 predecessor->next = qnode;
124                 while(qnode->locked);
125         }
126 }
127
128 void hart_lock_unlock(hart_lock_t* lock)
129 {
130         hart_lock_qnode_t* qnode = &lock->qnode[hart_self()];
131         if(qnode->next == 0)
132         {
133                 hart_lock_qnode_t* old_tail = hart_qnode_swap(&lock->lock,0);
134                 if(old_tail == qnode)
135                         return;
136
137                 hart_lock_qnode_t* usurper = hart_qnode_swap(&lock->lock,old_tail);
138                 while(qnode->next == 0);
139                 if(usurper)
140                         usurper->next = qnode->next;
141                 else
142                         qnode->next->locked = 0;
143         }
144         else
145                 qnode->next->locked = 0;
146 }
147
148 // MCS dissemination barrier!
149 error_t hart_barrier_init(hart_barrier_t* b, size_t np)
150 {
151         if(np > hart_max_harts())
152                 return -1;
153         b->allnodes = (hart_dissem_flags_t*)calloc(np,sizeof(hart_dissem_flags_t));
154         b->nprocs = np;
155
156         b->logp = (np & (np-1)) != 0;
157         while(np >>= 1)
158                 b->logp++;
159
160         size_t i,k;
161         for(i = 0; i < b->nprocs; i++)
162         {
163                 b->allnodes[i].parity = 0;
164                 b->allnodes[i].sense = 1;
165
166                 for(k = 0; k < b->logp; k++)
167                 {
168                         size_t j = (i+(1<<k)) % b->nprocs;
169                         b->allnodes[i].partnerflags[0][k] = &b->allnodes[j].myflags[0][k];
170                         b->allnodes[i].partnerflags[1][k] = &b->allnodes[j].myflags[1][k];
171                 } 
172         }
173
174         return 0;
175 }
176
177 void hart_barrier_wait(hart_barrier_t* b, size_t pid)
178 {
179         hart_dissem_flags_t* localflags = &b->allnodes[pid];
180         size_t i;
181         for(i = 0; i < b->logp; i++)
182         {
183                 *localflags->partnerflags[localflags->parity][i] = localflags->sense;
184                 while(localflags->myflags[localflags->parity][i] != localflags->sense);
185         }
186         if(localflags->parity)
187                 localflags->sense = 1-localflags->sense;
188         localflags->parity = 1-localflags->parity;
189 }
190
191 size_t
192 hart_self()
193 {
194         // defined in ros/arch/hart.h
195         return __hart_self();
196 }