Page coloring for boxboro tests
[akaros.git] / kern / src / manager.c
1 /*
2  * Copyright (c) 2009 The Regents of the University of California
3  * Barret Rhoden <brho@cs.berkeley.edu>
4  * See LICENSE for details.
5  */
6
7
8 #ifdef __SHARC__
9 #pragma nosharc
10 #endif
11
12 #include <ros/common.h>
13 #include <smp.h>
14 #include <arch/init.h>
15 #include <mm.h>
16 #include <elf.h>
17 #include <frontend.h>
18
19 #include <kmalloc.h>
20 #include <assert.h>
21 #include <manager.h>
22 #include <process.h>
23 #include <schedule.h>
24 #include <syscall.h>
25 #include <testing.h>
26 #include <kfs.h>
27 #include <stdio.h>
28 #include <timing.h>
29 #include <resource.h>
30 #include <monitor.h>
31 #include <colored_caches.h>
32 #include <string.h>
33 #include <pmap.h>
34 #include <ros/timer.h>
35 #include <ros/arch/membar.h>
36
37 /*
38  * Currently, if you leave this function by way of proc_run (process_workqueue
39  * that proc_runs), you will never come back to where you left off, and the
40  * function will start from the top.  Hence the hack 'progress'.
41  */
42 void manager(void)
43 {
44         #ifndef DEVELOPER_NAME
45                 #define DEVELOPER_NAME brho
46         #endif
47
48         // LoL
49         #define PASTE(s1,s2) s1 ## s2
50         #define MANAGER_FUNC(dev) PASTE(manager_,dev)
51
52         void MANAGER_FUNC(DEVELOPER_NAME)(void);
53         MANAGER_FUNC(DEVELOPER_NAME)();
54 }
55
56 /* Helper macro for quickly running something out of KFS.  Pass it a string and
57  * a proc pointer. */
58 #define quick_proc_run(x, p)                                                     \
59         (p) = kfs_proc_create(kfs_lookup_path((x)));                                 \
60         spin_lock(&(p)->proc_lock);                                                  \
61         __proc_set_state((p), PROC_RUNNABLE_S);                                      \
62         spin_unlock(&(p)->proc_lock);                                                \
63         proc_run((p));                                                               \
64         proc_decref((p), 1);
65
66 #define quick_proc_create(x, p)                                                  \
67         (p) = kfs_proc_create(kfs_lookup_path((x)));                                 \
68         spin_lock(&(p)->proc_lock);                                                  \
69         __proc_set_state((p), PROC_RUNNABLE_S);                                      \
70         spin_unlock(&(p)->proc_lock);
71
72 #define quick_proc_color_run(x, p, c)                                            \
73         (p) = kfs_proc_create(kfs_lookup_path((x)));                                 \
74         spin_lock(&(p)->proc_lock);                                                  \
75         __proc_set_state((p), PROC_RUNNABLE_S);                                      \
76         spin_unlock(&(p)->proc_lock);                                                \
77         p->cache_colors_map = cache_colors_map_alloc();                              \
78         for (int i = 0; i < (c); i++)                                                \
79                 cache_color_alloc(llc_cache, p->cache_colors_map);                       \
80         proc_run((p));                                                               \
81         proc_decref((p), 1);
82
83 #define quick_proc_color_create(x, p, c)                                         \
84         (p) = kfs_proc_create(kfs_lookup_path((x)));                                 \
85         spin_lock(&(p)->proc_lock);                                                  \
86         __proc_set_state((p), PROC_RUNNABLE_S);                                      \
87         spin_unlock(&(p)->proc_lock);                                                \
88         p->cache_colors_map = cache_colors_map_alloc();                              \
89         for (int i = 0; i < (c); i++)                                                \
90                 cache_color_alloc(llc_cache, p->cache_colors_map);
91
92 void manager_brho(void)
93 {
94         static uint8_t RACY progress = 0;
95         static struct proc *p;
96
97         // for testing taking cores, check in case 1 for usage
98         uint32_t corelist[MAX_NUM_CPUS];
99         uint32_t num = 3;
100
101         switch (progress++) {
102                 case 0:
103                         /* 124 is half of the available boxboro colors (with the kernel
104                          * getting 8) */
105                         quick_proc_color_run("msr_dumb_while", p, 124);
106                         //quick_proc_run("msr_dumb_while", p);
107                         #if 0
108                         // this is how you can transition to a parallel process manually
109                         // make sure you don't proc run first
110                         __proc_set_state(p, PROC_RUNNING_S);
111                         __proc_set_state(p, PROC_RUNNABLE_M);
112                         p->resources[RES_CORES].amt_wanted = 5;
113                         spin_unlock(&p->proc_lock);
114                         core_request(p);
115                         panic("This is okay");
116                         #endif
117                         break;
118                 case 1:
119                         #if 0
120                         udelay(10000000);
121                         // this is a ghetto way to test restarting an _M
122                                 printk("\nattempting to ghetto preempt...\n");
123                                 spin_lock(&p->proc_lock);
124                                 proc_take_allcores(p, __death);
125                                 __proc_set_state(p, PROC_RUNNABLE_M);
126                                 spin_unlock(&p->proc_lock);
127                                 udelay(5000000);
128                                 printk("\nattempting to restart...\n");
129                                 core_request(p); // proc still wants the cores
130                         panic("This is okay");
131                         // this tests taking some cores, and later killing an _M
132                                 printk("taking 3 cores from p\n");
133                                 for (int i = 0; i < num; i++)
134                                         corelist[i] = 7-i; // 7, 6, and 5
135                                 spin_lock(&p->proc_lock);
136                                 proc_take_cores(p, corelist, &num, __death);
137                                 spin_unlock(&p->proc_lock);
138                                 udelay(5000000);
139                                 printk("Killing p\n");
140                                 proc_destroy(p);
141                                 printk("Killed p\n");
142                         panic("This is okay");
143
144                         envs[0] = kfs_proc_create(kfs_lookup_path("roslib_hello"));
145                         __proc_set_state(envs[0], PROC_RUNNABLE_S);
146                         proc_run(envs[0]);
147                         break;
148                         #endif
149                 case 2:
150                         /*
151                         test_smp_call_functions();
152                         test_checklists();
153                         test_barrier();
154                         test_print_info();
155                         test_lapic_status_bit();
156                         test_ipi_sending();
157                         test_pit();
158                         */
159                 default:
160                         printd("Manager Progress: %d\n", progress);
161                         // delay if you want to test rescheduling an MCP that yielded
162                         //udelay(15000000);
163                         schedule();
164         }
165         panic("If you see me, then you probably screwed up");
166         monitor(0);
167
168         /*
169         printk("Servicing syscalls from Core 0:\n\n");
170         while (1) {
171                 process_generic_syscalls(&envs[0], 1);
172                 cpu_relax();
173         }
174         */
175         return;
176 }
177
178 void manager_klueska()
179 {
180         static struct proc *envs[256];
181         static volatile uint8_t progress = 0;
182
183         if (progress == 0) {
184                 progress++;
185                 envs[0] = kfs_proc_create(kfs_lookup_path("fillmeup"));
186                 __proc_set_state(envs[0], PROC_RUNNABLE_S);
187                 proc_run(envs[0]);
188         }
189         schedule();
190
191         panic("DON'T PANIC");
192 }
193
194 struct elf_info
195 {
196         long entry;
197         long phdr;
198         int phnum;
199         int dynamic;
200         char interp[256];
201 };
202
203 void manager_waterman()
204 {
205         static int init = 0;
206         if(!init)
207         {
208                 init = 1;
209                 struct proc* p = proc_create(NULL,0);
210
211                 char* argv[] = {"/bin/sh","-l",0};
212                 char* envp[] = {"LD_LIBRARY_PATH=/lib",0};
213                 procinfo_pack_args(p->procinfo,argv,envp);
214
215                 struct file* f = file_open("/bin/busybox",0,0);
216                 assert(f != NULL);
217                 assert(load_elf(p,f) == 0);
218                 file_decref(f);
219
220                 __proc_set_state(p, PROC_RUNNABLE_S);
221                 proc_run(p);
222         }
223         schedule();
224 }
225
226 void manager_pearce()
227 {
228         static struct proc *envs[256];
229         static volatile uint8_t progress = 0;
230
231         if (progress == 0) {
232                 progress++;
233                 envs[0] = kfs_proc_create(kfs_lookup_path("parlib_httpserver_integrated"));
234                 //envs[0] = kfs_proc_create(kfs_lookup_path("parlib_lock_test"));
235                 __proc_set_state(envs[0], PROC_RUNNABLE_S);
236                 proc_run(envs[0]);
237         }
238         schedule();
239
240         panic("DON'T PANIC");
241
242 }
243
244 #ifdef __CONFIG_OSDI__
245 /* Manager for Micro benchmarks, OSDI, etc */
246 struct proc *mgr_p1 = 0;
247 struct proc *mgr_p2 = 0;
248 static void exper_1_part2(struct proc **pp);
249 static void exper_2_part2(struct proc **pp);
250 static void exper_3_part2(struct proc **pp);
251 static void exper_4_part2(struct proc **pp);
252 static void exper_5_part2(struct proc **pp);
253 static void exper_6_part2(struct proc **pp);
254 static void exper_7_part2(struct proc **pp);
255 static void exper_8_part2(struct proc **pp);
256 static void exper_9_part2(struct proc **pp);
257
258 void manager_tests(void)
259 {
260         static uint8_t RACY progress = 0;
261
262         printk("Test Progress: %d\n", progress);
263         /* 10 runs of every experiment.  Finishing/Part2 is harmless on a null
264          * pointer.  We need to clean up/ finish/ part2 after each quick_proc_run,
265          * since we leave the monitor and only enter on another run (with
266          * progress++).  That's why we run a part2 in the first case: of the next
267          * experiment. */
268         switch (progress++) {
269                 /* Experiment 1: get max vcores */
270                 case 0:
271                         printk("************* Starting experiment 1 ************** \n");
272                 case 1:
273                 case 2:
274                 case 3:
275                 case 4:
276                 case 5:
277                 case 6:
278                 case 7:
279                 case 8:
280                 case 9:
281                         exper_1_part2(&mgr_p1);
282                         quick_proc_run("msr_get_cores", mgr_p1);
283                         break;
284                 /* Experiment 2: get a single vcore */
285                 case 10:
286                         exper_1_part2(&mgr_p1);
287                         printk("************* Starting experiment 2 ************** \n");
288                 case 11:
289                 case 12:
290                 case 13:
291                 case 14:
292                 case 15:
293                 case 16:
294                 case 17:
295                 case 18:
296                 case 19:
297                         exper_2_part2(&mgr_p1);
298                         quick_proc_run("msr_get_singlecore", mgr_p1);
299                         break;
300                 /* Experiment 3: kill a _M */
301                 case 20: /* leftover from exp 2 */
302                         exper_2_part2(&mgr_p1);
303                         printk("************* Starting experiment 3 ************** \n");
304                 case 21:
305                 case 22:
306                 case 23:
307                 case 24:
308                 case 25:
309                 case 26:
310                 case 27:
311                 case 28:
312                 case 29:
313                         exper_3_part2(&mgr_p1);
314                         quick_proc_run("msr_dumb_while", mgr_p1);
315                         break;
316                 /* Experiment 4: _S create and death*/
317                 case 30: /* leftover from exp 3 */
318                         exper_3_part2(&mgr_p1);
319                         printk("************* Starting experiment 4 ************** \n");
320                 case 31:
321                 case 32:
322                 case 33:
323                 case 34:
324                 case 35:
325                 case 36:
326                 case 37:
327                 case 38:
328                 case 39:
329                         exper_4_part2(&mgr_p1);
330                         printk("[T]:004:S:%llu\n", read_tsc());
331                         quick_proc_run("tsc_spitter", mgr_p1);
332                         break;
333                 /* Experiment 5: raw preempt, entire process*/
334                 case 40:
335                         exper_4_part2(&mgr_p1);
336                         printk("************* Starting experiment 5 ************** \n");
337                 case 41:
338                 case 42:
339                 case 43:
340                 case 44:
341                 case 45:
342                 case 46:
343                 case 47:
344                 case 48:
345                 case 49:
346                         exper_5_part2(&mgr_p1);
347                         quick_proc_run("msr_nice_while", mgr_p1);
348                         break;
349                 /* Experiment 6: preempt-warn, entire process */
350                 case 50:
351                         exper_5_part2(&mgr_p1);
352                         printk("************* Starting experiment 6 ************** \n");
353                 case 51:
354                 case 52:
355                 case 53:
356                 case 54:
357                 case 55:
358                 case 56:
359                 case 57:
360                 case 58:
361                 case 59:
362                         exper_6_part2(&mgr_p1);
363                         quick_proc_run("msr_nice_while", mgr_p1);
364                         break;
365                 /* Experiment 7: preempt-raw, single core */
366                 case 60:
367                         exper_6_part2(&mgr_p1);
368                         printk("************* Starting experiment 7 ************** \n");
369                 case 61:
370                 case 62:
371                 case 63:
372                 case 64:
373                 case 65:
374                 case 66:
375                 case 67:
376                 case 68:
377                 case 69:
378                         exper_7_part2(&mgr_p1);
379                         quick_proc_run("msr_nice_while", mgr_p1);
380                         break;
381                 /* Experiment 8: preempt-warn, single core */
382                 case 70:
383                         exper_7_part2(&mgr_p1);
384                         printk("************* Starting experiment 8 ************** \n");
385                 case 71:
386                 case 72:
387                 case 73:
388                 case 74:
389                 case 75:
390                 case 76:
391                 case 77:
392                 case 78:
393                 case 79:
394                         exper_8_part2(&mgr_p1);
395                         quick_proc_run("msr_nice_while", mgr_p1);
396                         break;
397                 /* Experiment 9: single notification time */
398                 case 80:
399                         exper_8_part2(&mgr_p1);
400                         printk("************* Starting experiment 9 ************** \n");
401                 case 81:
402                 case 82:
403                 case 83:
404                 case 84:
405                 case 85:
406                 case 86:
407                 case 87:
408                 case 88:
409                 case 89:
410                         exper_9_part2(&mgr_p1);
411                         quick_proc_run("msr_dumb_while", mgr_p1);
412                         break;
413                 /* Experiment 10: cycling vcore */
414                 case 90:
415                         exper_9_part2(&mgr_p1);
416                         printk("************* Starting experiment 10 ************* \n");
417                         quick_proc_run("msr_dumb_while", mgr_p1);
418                         break;
419                 case 91:
420                         quick_proc_run("msr_cycling_vcores", mgr_p2);
421                         break;
422                 case 92:
423                         printk("Will go on forever.  Udelaying for two minutes.\n");
424                         udelay(120000000);
425                         proc_incref(mgr_p1, 1);
426                         proc_destroy(mgr_p1);
427                         proc_decref(mgr_p1, 1);
428                         proc_incref(mgr_p2, 1);
429                         proc_destroy(mgr_p2);
430                         proc_decref(mgr_p2, 1);
431                         printk("Done with the tests!");
432                         monitor(0);
433                         break;
434                 default:
435                         printd("Manager Progress: %d\n", progress);
436                         schedule();
437         }
438         monitor(0);
439         return;
440 }
441
442 /* OSDI experiment "bottom halves" */
443 /* Experiment 1: get max vcores */
444 static void exper_1_part2(struct proc **pp)
445 {
446         while (*pp) /* make sure the previous run is over */
447                 cpu_relax();
448 }
449
450 /* Experiment 2: get a single vcore */
451 static void exper_2_part2(struct proc **pp)
452 {
453         while (*pp) /* make sure the previous run is over */
454                 cpu_relax();
455 }
456
457 /* Experiment 3: kill a _M */
458 static void exper_3_part2(struct proc **pp)
459 {
460         uint64_t begin = 0, diff = 0;
461
462         if (*pp) { /* need to kill, etc */
463                 proc_incref(*pp, 1);
464                 begin = start_timing(); 
465                 proc_destroy(*pp);
466                 proc_decref(*pp, 1);
467                 wmb();
468                 while (*pp) /* toggled in proc_free */
469                         cpu_relax();
470                 diff = stop_timing(begin);      
471                 printk("Took %llu usec (%llu nsec) to kill.\n",
472                        diff * 1000000 / system_timing.tsc_freq,
473                        diff * 1000000000 / system_timing.tsc_freq);
474                 printk("[T]:003:%llu:%llu\n",
475                        diff * 1000000 / system_timing.tsc_freq,
476                        diff * 1000000000 / system_timing.tsc_freq);
477         }
478 }
479
480 /* Experiment 4: _S create and death*/
481 static void exper_4_part2(struct proc **pp)
482 {
483         while (*pp) /* make sure the previous run is over */
484                 cpu_relax();
485 }
486
487 /* Experiment 5: raw preempt, entire process*/
488 static void exper_5_part2(struct proc **pp)
489 {
490         uint64_t begin = 0, diff = 0;
491         uint32_t end_refcnt = 0;
492         bool self_ipi_pending = FALSE;
493
494         if (*pp) {
495                 proc_incref(*pp, 1);
496                 spin_lock(&(*pp)->proc_lock);
497                 end_refcnt = (*pp)->env_refcnt - (*pp)->procinfo->num_vcores;
498                 begin = start_timing();
499                 self_ipi_pending = __proc_preempt_all(*pp);
500                 spin_unlock(&(*pp)->proc_lock);
501                 __proc_kmsg_pending(*pp, self_ipi_pending);
502                 spin_on((*pp)->env_refcnt != end_refcnt);
503                 diff = stop_timing(begin);
504                 printk("Took %llu usec (%llu nsec) to raw preempt all.\n",
505                        diff * 1000000 / system_timing.tsc_freq,
506                        diff * 1000000000 / system_timing.tsc_freq);
507                 printk("[T]:005:%llu:%llu\n",
508                        diff * 1000000 / system_timing.tsc_freq,
509                        diff * 1000000000 / system_timing.tsc_freq);
510                 proc_destroy(*pp);
511                 proc_decref(*pp, 1);
512                 while (*pp) /* toggled in proc_free */
513                         cpu_relax();
514         }
515 }
516
517 /* Experiment 6: preempt-warn, entire process */
518 static void exper_6_part2(struct proc **pp)
519 {
520         uint64_t begin = 0, diff = 0;
521
522         if (*pp) {
523                 proc_incref(*pp, 1);
524                 spin_lock(&(*pp)->proc_lock);
525                 begin = start_timing();
526                 __proc_preempt_warnall(*pp, 1000000);
527                 spin_unlock(&(*pp)->proc_lock);
528                 spin_on((*pp)->procinfo->num_vcores > 1);
529                 diff = stop_timing(begin);
530                 printk("Took %llu usec (%llu nsec) to warn preempt all.\n",
531                        diff * 1000000 / system_timing.tsc_freq,
532                        diff * 1000000000 / system_timing.tsc_freq);
533                 printk("[T]:006:%llu:%llu\n",
534                        diff * 1000000 / system_timing.tsc_freq,
535                        diff * 1000000000 / system_timing.tsc_freq);
536                 proc_destroy(*pp);
537                 proc_decref(*pp, 1);
538                 while (*pp) /* toggled in proc_free */
539                         cpu_relax();
540         }
541 }
542
543 /* Experiment 7: preempt-raw, single core */
544 static void exper_7_part2(struct proc **pp)
545 {
546         uint64_t begin = 0, diff = 0;
547         bool self_ipi_pending = FALSE;
548         uint32_t vcoreid, pcoreid = 7; // some core available on all systems
549
550         if (*pp) {
551                 proc_incref(*pp, 1);
552                 spin_lock(&(*pp)->proc_lock);
553                 assert((*pp)->procinfo->pcoremap[pcoreid].valid);
554                 begin = start_timing();
555                 self_ipi_pending = __proc_preempt_core(*pp, pcoreid);
556                 spin_unlock(&(*pp)->proc_lock);
557                 __proc_kmsg_pending(*pp, self_ipi_pending);
558                 spin_on((*pp)->procinfo->pcoremap[pcoreid].valid);
559                 diff = stop_timing(begin);
560                 printk("Took %llu usec (%llu nsec) to raw-preempt one core.\n",
561                        diff * 1000000 / system_timing.tsc_freq,
562                        diff * 1000000000 / system_timing.tsc_freq);
563                 printk("[T]:007:%llu:%llu\n",
564                        diff * 1000000 / system_timing.tsc_freq,
565                        diff * 1000000000 / system_timing.tsc_freq);
566                 proc_destroy(*pp);
567                 proc_decref(*pp, 1);
568                 while (*pp) /* toggled in proc_free */
569                         cpu_relax();
570         }
571 }
572
573 /* Experiment 8: preempt-warn, single core */
574 static void exper_8_part2(struct proc **pp)
575 {
576         uint64_t begin = 0, diff = 0;
577         uint32_t vcoreid, pcoreid = 7; // some core available on all systems
578
579         if (*pp) {
580                 proc_incref(*pp, 1);
581                 spin_lock(&(*pp)->proc_lock);
582                 vcoreid = (*pp)->procinfo->pcoremap[pcoreid].vcoreid;
583                 assert((*pp)->procinfo->pcoremap[pcoreid].valid);
584                 begin = start_timing();
585                 __proc_preempt_warn(*pp, vcoreid, 1000000); // 1 sec
586                 spin_unlock(&(*pp)->proc_lock);
587                 spin_on((*pp)->procinfo->pcoremap[pcoreid].valid);
588                 diff = stop_timing(begin);
589                 printk("Took %llu usec (%llu nsec) to warn-preempt one core.\n",
590                        diff * 1000000 / system_timing.tsc_freq,
591                        diff * 1000000000 / system_timing.tsc_freq);
592                 printk("[T]:008:%llu:%llu\n",
593                        diff * 1000000 / system_timing.tsc_freq,
594                        diff * 1000000000 / system_timing.tsc_freq);
595                 proc_destroy(*pp);
596                 proc_decref(*pp, 1);
597                 while (*pp) /* toggled in proc_free */
598                         cpu_relax();
599         }
600 }
601
602 /* Experiment 9: single notification time */
603 static void exper_9_part2(struct proc **pp)
604 {
605         struct notif_event ne = {0};
606
607         if (*pp) {
608                 ne.ne_type = NE_ALARM;
609                 proc_incref(*pp, 1);
610                 printk("[T]:009:B:%llu\n", read_tsc());
611                 proc_notify(*pp, NE_ALARM, &ne); 
612                 proc_destroy(*pp);
613                 proc_decref(*pp, 1);
614                 while (*pp) /* toggled in proc_free */
615                         cpu_relax();
616         }
617 }
618
619 #endif /* __CONFIG_OSDI__ */
620
621 #ifdef __sparc_v8__
622
623 static char*
624 itoa(int num, char* buf0, size_t base)
625 {
626         if(base > 16)
627                 return NULL;
628
629         char* buf = buf0;
630         int len = 0, i;
631
632         if(num < 0)
633         {
634                 *buf++ = '-';
635                 num = -num;
636         }
637
638         do {
639                 buf[len++] = "0123456789abcdef"[num%base];
640                 num /= base;
641         } while(num);
642
643         for(i = 0; i < len/2; i++)
644         {
645                 char temp = buf[i];
646                 buf[i] = buf[len-i-1];
647                 buf[len-i-1] = temp;
648         }
649         buf[len] = 0;
650
651         return buf0;
652 }
653
654 void gsf_set_frame_cycles(int cycles)
655 {
656         store_alternate(26*4,2,cycles);
657 }
658
659 void gsf_set_partition_credits(int partition, int credits)
660 {
661         store_alternate((32+partition)*4,2,credits);
662 }
663
664 void gsf_set_core_partition(int core, int partition)
665 {
666         store_alternate((64+core)*4,2,partition);
667 }
668
669 #endif
670