OSDI Microbenchmarks
[akaros.git] / kern / src / manager.c
1 /*
2  * Copyright (c) 2009 The Regents of the University of California
3  * Barret Rhoden <brho@cs.berkeley.edu>
4  * See LICENSE for details.
5  */
6
7
8 #ifdef __SHARC__
9 #pragma nosharc
10 #endif
11
12 #include <ros/common.h>
13 #include <smp.h>
14 #include <arch/init.h>
15 #include <mm.h>
16 #include <elf.h>
17 #include <frontend.h>
18
19 #include <kmalloc.h>
20 #include <assert.h>
21 #include <manager.h>
22 #include <process.h>
23 #include <schedule.h>
24 #include <syscall.h>
25 #include <testing.h>
26 #include <kfs.h>
27 #include <stdio.h>
28 #include <timing.h>
29 #include <resource.h>
30 #include <monitor.h>
31 #include <colored_caches.h>
32 #include <string.h>
33 #include <pmap.h>
34 #include <ros/timer.h>
35 #include <ros/arch/membar.h>
36
37 /*
38  * Currently, if you leave this function by way of proc_run (process_workqueue
39  * that proc_runs), you will never come back to where you left off, and the
40  * function will start from the top.  Hence the hack 'progress'.
41  */
42 void manager(void)
43 {
44         #ifndef DEVELOPER_NAME
45                 #define DEVELOPER_NAME brho
46         #endif
47
48         // LoL
49         #define PASTE(s1,s2) s1 ## s2
50         #define MANAGER_FUNC(dev) PASTE(manager_,dev)
51
52         void MANAGER_FUNC(DEVELOPER_NAME)(void);
53         MANAGER_FUNC(DEVELOPER_NAME)();
54 }
55
56 /* Helper macro for quickly running something out of KFS.  Pass it a string and
57  * a proc pointer. */
58 #define quick_proc_run(x, p)                                                     \
59         (p) = kfs_proc_create(kfs_lookup_path((x)));                                 \
60         spin_lock(&(p)->proc_lock);                                                  \
61         __proc_set_state((p), PROC_RUNNABLE_S);                                      \
62         spin_unlock(&(p)->proc_lock);                                                \
63         proc_run((p));                                                               \
64         proc_decref((p), 1);
65
66 void manager_brho(void)
67 {
68         static uint8_t RACY progress = 0;
69         static struct proc *p;
70
71         // for testing taking cores, check in case 1 for usage
72         uint32_t corelist[MAX_NUM_CPUS];
73         uint32_t num = 3;
74
75         switch (progress++) {
76                 case 0:
77                         quick_proc_run("msr_dumb_while", p);
78                         #if 0
79                         // this is how you can transition to a parallel process manually
80                         // make sure you don't proc run first
81                         __proc_set_state(p, PROC_RUNNING_S);
82                         __proc_set_state(p, PROC_RUNNABLE_M);
83                         p->resources[RES_CORES].amt_wanted = 5;
84                         spin_unlock(&p->proc_lock);
85                         core_request(p);
86                         panic("This is okay");
87                         #endif
88                         break;
89                 case 1:
90                         #if 0
91                         udelay(10000000);
92                         // this is a ghetto way to test restarting an _M
93                                 printk("\nattempting to ghetto preempt...\n");
94                                 spin_lock(&p->proc_lock);
95                                 proc_take_allcores(p, __death);
96                                 __proc_set_state(p, PROC_RUNNABLE_M);
97                                 spin_unlock(&p->proc_lock);
98                                 udelay(5000000);
99                                 printk("\nattempting to restart...\n");
100                                 core_request(p); // proc still wants the cores
101                         panic("This is okay");
102                         // this tests taking some cores, and later killing an _M
103                                 printk("taking 3 cores from p\n");
104                                 for (int i = 0; i < num; i++)
105                                         corelist[i] = 7-i; // 7, 6, and 5
106                                 spin_lock(&p->proc_lock);
107                                 proc_take_cores(p, corelist, &num, __death);
108                                 spin_unlock(&p->proc_lock);
109                                 udelay(5000000);
110                                 printk("Killing p\n");
111                                 proc_destroy(p);
112                                 printk("Killed p\n");
113                         panic("This is okay");
114
115                         envs[0] = kfs_proc_create(kfs_lookup_path("roslib_hello"));
116                         __proc_set_state(envs[0], PROC_RUNNABLE_S);
117                         proc_run(envs[0]);
118                         break;
119                         #endif
120                 case 2:
121                         /*
122                         test_smp_call_functions();
123                         test_checklists();
124                         test_barrier();
125                         test_print_info();
126                         test_lapic_status_bit();
127                         test_ipi_sending();
128                         test_pit();
129                         */
130                 default:
131                         printd("Manager Progress: %d\n", progress);
132                         // delay if you want to test rescheduling an MCP that yielded
133                         //udelay(15000000);
134                         schedule();
135         }
136         panic("If you see me, then you probably screwed up");
137         monitor(0);
138
139         /*
140         printk("Servicing syscalls from Core 0:\n\n");
141         while (1) {
142                 process_generic_syscalls(&envs[0], 1);
143                 cpu_relax();
144         }
145         */
146         return;
147 }
148
149 void manager_klueska()
150 {
151         static struct proc *envs[256];
152         static volatile uint8_t progress = 0;
153
154         if (progress == 0) {
155                 progress++;
156                 envs[0] = kfs_proc_create(kfs_lookup_path("fillmeup"));
157                 __proc_set_state(envs[0], PROC_RUNNABLE_S);
158                 proc_run(envs[0]);
159         }
160         schedule();
161
162         panic("DON'T PANIC");
163 }
164
165 struct elf_info
166 {
167         long entry;
168         long phdr;
169         int phnum;
170         int dynamic;
171         char interp[256];
172 };
173
174 void manager_waterman()
175 {
176         static int init = 0;
177         if(!init)
178         {
179                 init = 1;
180                 struct proc* p = proc_create(NULL,0);
181
182                 char* argv[] = {"/bin/sh","-l",0};
183                 char* envp[] = {"LD_LIBRARY_PATH=/lib",0};
184                 procinfo_pack_args(p->procinfo,argv,envp);
185
186                 struct file* f = file_open("/bin/busybox",0,0);
187                 assert(f != NULL);
188                 assert(load_elf(p,f) == 0);
189                 file_decref(f);
190
191                 __proc_set_state(p, PROC_RUNNABLE_S);
192                 proc_run(p);
193         }
194         schedule();
195 }
196
197 void manager_pearce()
198 {
199         static struct proc *envs[256];
200         static volatile uint8_t progress = 0;
201
202         if (progress == 0) {
203                 progress++;
204                 envs[0] = kfs_proc_create(kfs_lookup_path("parlib_httpserver_integrated"));
205                 //envs[0] = kfs_proc_create(kfs_lookup_path("parlib_lock_test"));
206                 __proc_set_state(envs[0], PROC_RUNNABLE_S);
207                 proc_run(envs[0]);
208         }
209         schedule();
210
211         panic("DON'T PANIC");
212
213 }
214
215 #ifdef __CONFIG_OSDI__
216 /* Manager for Micro benchmarks, OSDI, etc */
217 struct proc *mgr_p1 = 0;
218 struct proc *mgr_p2 = 0;
219 static void exper_1_part2(struct proc **pp);
220 static void exper_2_part2(struct proc **pp);
221 static void exper_3_part2(struct proc **pp);
222 static void exper_4_part2(struct proc **pp);
223 static void exper_5_part2(struct proc **pp);
224 static void exper_6_part2(struct proc **pp);
225 static void exper_7_part2(struct proc **pp);
226 static void exper_8_part2(struct proc **pp);
227 static void exper_9_part2(struct proc **pp);
228
229 void manager_tests(void)
230 {
231         static uint8_t RACY progress = 0;
232
233         printk("Test Progress: %d\n", progress);
234         /* 10 runs of every experiment.  Finishing/Part2 is harmless on a null
235          * pointer.  We need to clean up/ finish/ part2 after each quick_proc_run,
236          * since we leave the monitor and only enter on another run (with
237          * progress++).  That's why we run a part2 in the first case: of the next
238          * experiment. */
239         switch (progress++) {
240                 /* Experiment 1: get max vcores */
241                 case 0:
242                         printk("************* Starting experiment 1 ************** \n");
243                 case 1:
244                 case 2:
245                 case 3:
246                 case 4:
247                 case 5:
248                 case 6:
249                 case 7:
250                 case 8:
251                 case 9:
252                         exper_1_part2(&mgr_p1);
253                         quick_proc_run("msr_get_cores", mgr_p1);
254                         break;
255                 /* Experiment 2: get a single vcore */
256                 case 10:
257                         exper_1_part2(&mgr_p1);
258                         printk("************* Starting experiment 2 ************** \n");
259                 case 11:
260                 case 12:
261                 case 13:
262                 case 14:
263                 case 15:
264                 case 16:
265                 case 17:
266                 case 18:
267                 case 19:
268                         exper_2_part2(&mgr_p1);
269                         quick_proc_run("msr_get_singlecore", mgr_p1);
270                         break;
271                 /* Experiment 3: kill a _M */
272                 case 20: /* leftover from exp 2 */
273                         exper_2_part2(&mgr_p1);
274                         printk("************* Starting experiment 3 ************** \n");
275                 case 21:
276                 case 22:
277                 case 23:
278                 case 24:
279                 case 25:
280                 case 26:
281                 case 27:
282                 case 28:
283                 case 29:
284                         exper_3_part2(&mgr_p1);
285                         quick_proc_run("msr_dumb_while", mgr_p1);
286                         break;
287                 /* Experiment 4: _S create and death*/
288                 case 30: /* leftover from exp 3 */
289                         exper_3_part2(&mgr_p1);
290                         printk("************* Starting experiment 4 ************** \n");
291                 case 31:
292                 case 32:
293                 case 33:
294                 case 34:
295                 case 35:
296                 case 36:
297                 case 37:
298                 case 38:
299                 case 39:
300                         exper_4_part2(&mgr_p1);
301                         printk("[T]:004:S:%llu\n", read_tsc());
302                         quick_proc_run("tsc_spitter", mgr_p1);
303                         break;
304                 /* Experiment 5: raw preempt, entire process*/
305                 case 40:
306                         exper_4_part2(&mgr_p1);
307                         printk("************* Starting experiment 5 ************** \n");
308                 case 41:
309                 case 42:
310                 case 43:
311                 case 44:
312                 case 45:
313                 case 46:
314                 case 47:
315                 case 48:
316                 case 49:
317                         exper_5_part2(&mgr_p1);
318                         quick_proc_run("msr_nice_while", mgr_p1);
319                         break;
320                 /* Experiment 6: preempt-warn, entire process */
321                 case 50:
322                         exper_5_part2(&mgr_p1);
323                         printk("************* Starting experiment 6 ************** \n");
324                 case 51:
325                 case 52:
326                 case 53:
327                 case 54:
328                 case 55:
329                 case 56:
330                 case 57:
331                 case 58:
332                 case 59:
333                         exper_6_part2(&mgr_p1);
334                         quick_proc_run("msr_nice_while", mgr_p1);
335                         break;
336                 /* Experiment 7: preempt-raw, single core */
337                 case 60:
338                         exper_6_part2(&mgr_p1);
339                         printk("************* Starting experiment 7 ************** \n");
340                 case 61:
341                 case 62:
342                 case 63:
343                 case 64:
344                 case 65:
345                 case 66:
346                 case 67:
347                 case 68:
348                 case 69:
349                         exper_7_part2(&mgr_p1);
350                         quick_proc_run("msr_nice_while", mgr_p1);
351                         break;
352                 /* Experiment 8: preempt-warn, single core */
353                 case 70:
354                         exper_7_part2(&mgr_p1);
355                         printk("************* Starting experiment 8 ************** \n");
356                 case 71:
357                 case 72:
358                 case 73:
359                 case 74:
360                 case 75:
361                 case 76:
362                 case 77:
363                 case 78:
364                 case 79:
365                         exper_8_part2(&mgr_p1);
366                         quick_proc_run("msr_nice_while", mgr_p1);
367                         break;
368                 /* Experiment 9: single notification time */
369                 case 80:
370                         exper_8_part2(&mgr_p1);
371                         printk("************* Starting experiment 9 ************** \n");
372                 case 81:
373                 case 82:
374                 case 83:
375                 case 84:
376                 case 85:
377                 case 86:
378                 case 87:
379                 case 88:
380                 case 89:
381                         exper_9_part2(&mgr_p1);
382                         quick_proc_run("msr_dumb_while", mgr_p1);
383                         break;
384                 /* Experiment 10: cycling vcore */
385                 case 90:
386                         exper_9_part2(&mgr_p1);
387                         printk("************* Starting experiment 10 ************* \n");
388                         quick_proc_run("msr_dumb_while", mgr_p1);
389                         break;
390                 case 91:
391                         quick_proc_run("msr_cycling_vcores", mgr_p2);
392                         break;
393                 case 92:
394                         printk("Will go on forever.  Udelaying for two minutes.\n");
395                         udelay(120000000);
396                         proc_incref(mgr_p1, 1);
397                         proc_destroy(mgr_p1);
398                         proc_decref(mgr_p1, 1);
399                         proc_incref(mgr_p2, 1);
400                         proc_destroy(mgr_p2);
401                         proc_decref(mgr_p2, 1);
402                         printk("Done with the tests!");
403                         monitor(0);
404                         break;
405                 default:
406                         printd("Manager Progress: %d\n", progress);
407                         schedule();
408         }
409         monitor(0);
410         return;
411 }
412
413 /* OSDI experiment "bottom halves" */
414 /* Experiment 1: get max vcores */
415 static void exper_1_part2(struct proc **pp)
416 {
417         while (*pp) /* make sure the previous run is over */
418                 cpu_relax();
419 }
420
421 /* Experiment 2: get a single vcore */
422 static void exper_2_part2(struct proc **pp)
423 {
424         while (*pp) /* make sure the previous run is over */
425                 cpu_relax();
426 }
427
428 /* Experiment 3: kill a _M */
429 static void exper_3_part2(struct proc **pp)
430 {
431         uint64_t begin = 0, diff = 0;
432
433         if (*pp) { /* need to kill, etc */
434                 proc_incref(*pp, 1);
435                 begin = start_timing(); 
436                 proc_destroy(*pp);
437                 proc_decref(*pp, 1);
438                 wmb();
439                 while (*pp) /* toggled in proc_free */
440                         cpu_relax();
441                 diff = stop_timing(begin);      
442                 printk("Took %llu usec (%llu nsec) to kill.\n",
443                        diff * 1000000 / system_timing.tsc_freq,
444                        diff * 1000000000 / system_timing.tsc_freq);
445                 printk("[T]:003:%llu:%llu\n",
446                        diff * 1000000 / system_timing.tsc_freq,
447                        diff * 1000000000 / system_timing.tsc_freq);
448         }
449 }
450
451 /* Experiment 4: _S create and death*/
452 static void exper_4_part2(struct proc **pp)
453 {
454         while (*pp) /* make sure the previous run is over */
455                 cpu_relax();
456 }
457
458 /* Experiment 5: raw preempt, entire process*/
459 static void exper_5_part2(struct proc **pp)
460 {
461         uint64_t begin = 0, diff = 0;
462         uint32_t end_refcnt = 0;
463         bool self_ipi_pending = FALSE;
464
465         if (*pp) {
466                 proc_incref(*pp, 1);
467                 spin_lock(&(*pp)->proc_lock);
468                 end_refcnt = (*pp)->env_refcnt - (*pp)->procinfo->num_vcores;
469                 begin = start_timing();
470                 self_ipi_pending = __proc_preempt_all(*pp);
471                 spin_unlock(&(*pp)->proc_lock);
472                 __proc_kmsg_pending(*pp, self_ipi_pending);
473                 spin_on((*pp)->env_refcnt != end_refcnt);
474                 diff = stop_timing(begin);
475                 printk("Took %llu usec (%llu nsec) to raw preempt all.\n",
476                        diff * 1000000 / system_timing.tsc_freq,
477                        diff * 1000000000 / system_timing.tsc_freq);
478                 printk("[T]:005:%llu:%llu\n",
479                        diff * 1000000 / system_timing.tsc_freq,
480                        diff * 1000000000 / system_timing.tsc_freq);
481                 proc_destroy(*pp);
482                 proc_decref(*pp, 1);
483                 while (*pp) /* toggled in proc_free */
484                         cpu_relax();
485         }
486 }
487
488 /* Experiment 6: preempt-warn, entire process */
489 static void exper_6_part2(struct proc **pp)
490 {
491         uint64_t begin = 0, diff = 0;
492
493         if (*pp) {
494                 proc_incref(*pp, 1);
495                 spin_lock(&(*pp)->proc_lock);
496                 begin = start_timing();
497                 __proc_preempt_warnall(*pp, 1000000);
498                 spin_unlock(&(*pp)->proc_lock);
499                 spin_on((*pp)->procinfo->num_vcores > 1);
500                 diff = stop_timing(begin);
501                 printk("Took %llu usec (%llu nsec) to warn preempt all.\n",
502                        diff * 1000000 / system_timing.tsc_freq,
503                        diff * 1000000000 / system_timing.tsc_freq);
504                 printk("[T]:006:%llu:%llu\n",
505                        diff * 1000000 / system_timing.tsc_freq,
506                        diff * 1000000000 / system_timing.tsc_freq);
507                 proc_destroy(*pp);
508                 proc_decref(*pp, 1);
509                 while (*pp) /* toggled in proc_free */
510                         cpu_relax();
511         }
512 }
513
514 /* Experiment 7: preempt-raw, single core */
515 static void exper_7_part2(struct proc **pp)
516 {
517         uint64_t begin = 0, diff = 0;
518         bool self_ipi_pending = FALSE;
519         uint32_t vcoreid, pcoreid = 7; // some core available on all systems
520
521         if (*pp) {
522                 proc_incref(*pp, 1);
523                 spin_lock(&(*pp)->proc_lock);
524                 assert((*pp)->procinfo->pcoremap[pcoreid].valid);
525                 begin = start_timing();
526                 self_ipi_pending = __proc_preempt_core(*pp, pcoreid);
527                 spin_unlock(&(*pp)->proc_lock);
528                 __proc_kmsg_pending(*pp, self_ipi_pending);
529                 spin_on((*pp)->procinfo->pcoremap[pcoreid].valid);
530                 diff = stop_timing(begin);
531                 printk("Took %llu usec (%llu nsec) to raw-preempt one core.\n",
532                        diff * 1000000 / system_timing.tsc_freq,
533                        diff * 1000000000 / system_timing.tsc_freq);
534                 printk("[T]:007:%llu:%llu\n",
535                        diff * 1000000 / system_timing.tsc_freq,
536                        diff * 1000000000 / system_timing.tsc_freq);
537                 proc_destroy(*pp);
538                 proc_decref(*pp, 1);
539                 while (*pp) /* toggled in proc_free */
540                         cpu_relax();
541         }
542 }
543
544 /* Experiment 8: preempt-warn, single core */
545 static void exper_8_part2(struct proc **pp)
546 {
547         uint64_t begin = 0, diff = 0;
548         uint32_t vcoreid, pcoreid = 7; // some core available on all systems
549
550         if (*pp) {
551                 proc_incref(*pp, 1);
552                 spin_lock(&(*pp)->proc_lock);
553                 vcoreid = (*pp)->procinfo->pcoremap[pcoreid].vcoreid;
554                 assert((*pp)->procinfo->pcoremap[pcoreid].valid);
555                 begin = start_timing();
556                 __proc_preempt_warn(*pp, vcoreid, 1000000); // 1 sec
557                 spin_unlock(&(*pp)->proc_lock);
558                 spin_on((*pp)->procinfo->pcoremap[pcoreid].valid);
559                 diff = stop_timing(begin);
560                 printk("Took %llu usec (%llu nsec) to warn-preempt one core.\n",
561                        diff * 1000000 / system_timing.tsc_freq,
562                        diff * 1000000000 / system_timing.tsc_freq);
563                 printk("[T]:008:%llu:%llu\n",
564                        diff * 1000000 / system_timing.tsc_freq,
565                        diff * 1000000000 / system_timing.tsc_freq);
566                 proc_destroy(*pp);
567                 proc_decref(*pp, 1);
568                 while (*pp) /* toggled in proc_free */
569                         cpu_relax();
570         }
571 }
572
573 /* Experiment 9: single notification time */
574 static void exper_9_part2(struct proc **pp)
575 {
576         struct notif_event ne = {0};
577
578         if (*pp) {
579                 ne.ne_type = NE_ALARM;
580                 proc_incref(*pp, 1);
581                 printk("[T]:009:B:%llu\n", read_tsc());
582                 proc_notify(*pp, NE_ALARM, &ne); 
583                 proc_destroy(*pp);
584                 proc_decref(*pp, 1);
585                 while (*pp) /* toggled in proc_free */
586                         cpu_relax();
587         }
588 }
589
590 #endif /* __CONFIG_OSDI__ */
591
592 #ifdef __sparc_v8__
593
594 static char*
595 itoa(int num, char* buf0, size_t base)
596 {
597         if(base > 16)
598                 return NULL;
599
600         char* buf = buf0;
601         int len = 0, i;
602
603         if(num < 0)
604         {
605                 *buf++ = '-';
606                 num = -num;
607         }
608
609         do {
610                 buf[len++] = "0123456789abcdef"[num%base];
611                 num /= base;
612         } while(num);
613
614         for(i = 0; i < len/2; i++)
615         {
616                 char temp = buf[i];
617                 buf[i] = buf[len-i-1];
618                 buf[len-i-1] = temp;
619         }
620         buf[len] = 0;
621
622         return buf0;
623 }
624
625 void gsf_set_frame_cycles(int cycles)
626 {
627         store_alternate(26*4,2,cycles);
628 }
629
630 void gsf_set_partition_credits(int partition, int credits)
631 {
632         store_alternate((32+partition)*4,2,credits);
633 }
634
635 void gsf_set_core_partition(int core, int partition)
636 {
637         store_alternate((64+core)*4,2,partition);
638 }
639
640 #endif
641