11 #include <ros/memlayout.h>
12 #include <ros/common.h>
22 #include <arch/trap.h>
27 #include <multiboot.h>
29 #include <page_alloc.h>
33 #include <hashtable.h>
41 #define l1 (available_caches.l1)
42 #define l2 (available_caches.l2)
43 #define l3 (available_caches.l3)
47 void test_ipi_sending(void)
49 extern handler_t (CT(NUM_INTERRUPT_HANDLERS) RO interrupt_handlers)[];
52 register_interrupt_handler(interrupt_handlers, I_TESTING,
53 test_hello_world_handler, NULL);
54 enable_irqsave(&state);
55 cprintf("\nCORE 0 sending broadcast\n");
56 send_broadcast_ipi(I_TESTING);
58 cprintf("\nCORE 0 sending all others\n");
59 send_all_others_ipi(I_TESTING);
61 cprintf("\nCORE 0 sending self\n");
62 send_self_ipi(I_TESTING);
64 cprintf("\nCORE 0 sending ipi to physical 1\n");
65 send_ipi(get_hw_coreid(0x01), I_TESTING);
67 cprintf("\nCORE 0 sending ipi to physical 2\n");
68 send_ipi(get_hw_coreid(0x02), I_TESTING);
70 cprintf("\nCORE 0 sending ipi to physical 3\n");
71 send_ipi(get_hw_coreid(0x03), I_TESTING);
73 cprintf("\nCORE 0 sending ipi to physical 15\n");
74 send_ipi(get_hw_coreid(0x0f), I_TESTING);
76 cprintf("\nCORE 0 sending ipi to logical 2\n");
77 send_group_ipi(0x02, I_TESTING);
79 cprintf("\nCORE 0 sending ipi to logical 1\n");
80 send_group_ipi(0x01, I_TESTING);
83 disable_irqsave(&state);
86 // Note this never returns and will muck with any other timer work
87 void test_pic_reception(void)
89 register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
90 pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
92 cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
93 cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
94 unmask_lapic_lvt(LAPIC_LVT_LINT0);
95 cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
100 void test_ioapic_pit_reroute(void)
102 register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
103 ioapic_route_irq(0, 3);
105 cprintf("Starting pit on core 3....\n");
107 pit_set_timer(0xFFFE,TIMER_RATEGEN); // totally arbitrary time
110 ioapic_unroute_irq(0);
112 cprintf("Masked pit. Waiting before return...\n");
119 void test_print_info(void)
121 cprintf("\nCORE 0 asking all cores to print info:\n");
122 smp_call_function_all(test_print_info_handler, NULL, 0);
123 cprintf("\nDone!\n");
126 void test_page_coloring(void)
129 //Print the different cache properties of our machine
130 print_cache_properties("L1", l1);
132 print_cache_properties("L2", l2);
134 print_cache_properties("L3", l3);
137 //Print some stats about our memory
138 cprintf("Max Address: %llu\n", MAX_VADDR);
139 cprintf("Num Pages: %u\n", npages);
141 //Declare a local variable for allocating pages
144 cprintf("Contents of the page free list:\n");
145 for(int i=0; i<llc_cache->num_colors; i++) {
146 cprintf(" COLOR %d:\n", i);
147 LIST_FOREACH(page, &colored_page_free_list[i], pg_link) {
148 cprintf(" Page: %d\n", page2ppn(page));
152 //Run through and allocate all pages through l1_page_alloc
153 cprintf("Allocating from L1 page colors:\n");
154 for(int i=0; i<get_cache_num_page_colors(l1); i++) {
155 cprintf(" COLOR %d:\n", i);
156 while(colored_page_alloc(l1, &page, i) != -ENOMEM)
157 cprintf(" Page: %d\n", page2ppn(page));
160 //Put all the pages back by reinitializing
163 //Run through and allocate all pages through l2_page_alloc
164 cprintf("Allocating from L2 page colors:\n");
165 for(int i=0; i<get_cache_num_page_colors(l2); i++) {
166 cprintf(" COLOR %d:\n", i);
167 while(colored_page_alloc(l2, &page, i) != -ENOMEM)
168 cprintf(" Page: %d\n", page2ppn(page));
171 //Put all the pages back by reinitializing
174 //Run through and allocate all pages through l3_page_alloc
175 cprintf("Allocating from L3 page colors:\n");
176 for(int i=0; i<get_cache_num_page_colors(l3); i++) {
177 cprintf(" COLOR %d:\n", i);
178 while(colored_page_alloc(l3, &page, i) != -ENOMEM)
179 cprintf(" Page: %d\n", page2ppn(page));
182 //Put all the pages back by reinitializing
185 //Run through and allocate all pages through page_alloc
186 cprintf("Allocating from global allocator:\n");
187 while(upage_alloc(&page) != -ENOMEM)
188 cprintf(" Page: %d\n", page2ppn(page));
190 if(colored_page_alloc(l2, &page, 0) != -ENOMEM)
191 cprintf("Should not get here, all pages should already be gone!\n");
192 cprintf("All pages gone for sure...\n");
194 //Now lets put a few pages back using page_free..
195 cprintf("Reinserting pages via page_free and reallocating them...\n");
196 page_free(&pages[0]);
197 page_free(&pages[15]);
198 page_free(&pages[7]);
199 page_free(&pages[6]);
200 page_free(&pages[4]);
202 while(upage_alloc(&page) != -ENOMEM)
203 cprintf("Page: %d\n", page2ppn(page));
209 void test_color_alloc() {
210 size_t checkpoint = 0;
211 uint8_t* colors_map = kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
212 cache_color_alloc(l2, colors_map);
213 cache_color_alloc(l3, colors_map);
214 cache_color_alloc(l3, colors_map);
215 cache_color_alloc(l2, colors_map);
216 cache_color_free(llc_cache, colors_map);
217 cache_color_free(llc_cache, colors_map);
218 cache_color_free(llc_cache, colors_map);
219 cache_color_free(llc_cache, colors_map);
220 cache_color_free(llc_cache, colors_map);
221 cache_color_free(llc_cache, colors_map);
222 cache_color_free(llc_cache, colors_map);
223 cache_color_free(llc_cache, colors_map);
224 cache_color_free(llc_cache, colors_map);
225 cache_color_free(llc_cache, colors_map);
226 cache_color_free(llc_cache, colors_map);
227 cache_color_free(llc_cache, colors_map);
228 cache_color_free(llc_cache, colors_map);
229 cache_color_free(llc_cache, colors_map);
230 cache_color_free(llc_cache, colors_map);
231 cache_color_free(llc_cache, colors_map);
232 cache_color_free(l2, colors_map);
233 cache_color_free(llc_cache, colors_map);
234 cache_color_free(llc_cache, colors_map);
237 printk("L1 free colors, tot colors: %d\n", l1->num_colors);
238 PRINT_BITMASK(l1->free_colors_map, l1->num_colors);
239 printk("L2 free colors, tot colors: %d\n", l2->num_colors);
240 PRINT_BITMASK(l2->free_colors_map, l2->num_colors);
241 printk("L3 free colors, tot colors: %d\n", l3->num_colors);
242 PRINT_BITMASK(l3->free_colors_map, l3->num_colors);
243 printk("Process allocated colors\n");
244 PRINT_BITMASK(colors_map, llc_cache->num_colors);
245 printk("test_color_alloc() complete!\n");
248 barrier_t test_cpu_array;
250 void test_barrier(void)
252 cprintf("Core 0 initializing barrier\n");
253 init_barrier(&test_cpu_array, num_cpus);
254 cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
255 smp_call_function_all(test_barrier_handler, NULL, 0);
258 void test_interrupts_irqsave(void)
261 printd("Testing Nesting Enabling first, turning ints off:\n");
263 printd("Interrupts are: %x\n", irq_is_enabled());
264 assert(!irq_is_enabled());
265 printd("Enabling IRQSave\n");
266 enable_irqsave(&state);
267 printd("Interrupts are: %x\n", irq_is_enabled());
268 assert(irq_is_enabled());
269 printd("Enabling IRQSave Again\n");
270 enable_irqsave(&state);
271 printd("Interrupts are: %x\n", irq_is_enabled());
272 assert(irq_is_enabled());
273 printd("Disabling IRQSave Once\n");
274 disable_irqsave(&state);
275 printd("Interrupts are: %x\n", irq_is_enabled());
276 assert(irq_is_enabled());
277 printd("Disabling IRQSave Again\n");
278 disable_irqsave(&state);
279 printd("Interrupts are: %x\n", irq_is_enabled());
280 assert(!irq_is_enabled());
281 printd("Done. Should have been 0, 200, 200, 200, 0\n");
283 printd("Testing Nesting Disabling first, turning ints on:\n");
286 printd("Interrupts are: %x\n", irq_is_enabled());
287 assert(irq_is_enabled());
288 printd("Disabling IRQSave Once\n");
289 disable_irqsave(&state);
290 printd("Interrupts are: %x\n", irq_is_enabled());
291 assert(!irq_is_enabled());
292 printd("Disabling IRQSave Again\n");
293 disable_irqsave(&state);
294 printd("Interrupts are: %x\n", irq_is_enabled());
295 assert(!irq_is_enabled());
296 printd("Enabling IRQSave Once\n");
297 enable_irqsave(&state);
298 printd("Interrupts are: %x\n", irq_is_enabled());
299 assert(!irq_is_enabled());
300 printd("Enabling IRQSave Again\n");
301 enable_irqsave(&state);
302 printd("Interrupts are: %x\n", irq_is_enabled());
303 assert(irq_is_enabled());
304 printd("Done. Should have been 200, 0, 0, 0, 200 \n");
308 printd("Ints are off, enabling then disabling.\n");
309 enable_irqsave(&state);
310 printd("Interrupts are: %x\n", irq_is_enabled());
311 assert(irq_is_enabled());
312 disable_irqsave(&state);
313 printd("Interrupts are: %x\n", irq_is_enabled());
314 assert(!irq_is_enabled());
315 printd("Done. Should have been 200, 0\n");
319 printd("Ints are on, enabling then disabling.\n");
320 enable_irqsave(&state);
321 printd("Interrupts are: %x\n", irq_is_enabled());
322 assert(irq_is_enabled());
323 disable_irqsave(&state);
324 printd("Interrupts are: %x\n", irq_is_enabled());
325 assert(irq_is_enabled());
326 printd("Done. Should have been 200, 200\n");
330 printd("Ints are off, disabling then enabling.\n");
331 disable_irqsave(&state);
332 printd("Interrupts are: %x\n", irq_is_enabled());
333 assert(!irq_is_enabled());
334 enable_irqsave(&state);
335 printd("Interrupts are: %x\n", irq_is_enabled());
336 assert(!irq_is_enabled());
337 printd("Done. Should have been 0, 0\n");
341 printd("Ints are on, disabling then enabling.\n");
342 disable_irqsave(&state);
343 printd("Interrupts are: %x\n", irq_is_enabled());
344 assert(!irq_is_enabled());
345 enable_irqsave(&state);
346 printd("Interrupts are: %x\n", irq_is_enabled());
347 assert(irq_is_enabled());
348 printd("Done. Should have been 0, 200\n");
351 cprintf("Passed enable_irqsave tests\n");
354 void test_bitmasks(void)
357 DECL_BITMASK(mask, masksize);
358 printk("size of mask %d\n", sizeof(mask));
359 CLR_BITMASK(mask, masksize);
360 PRINT_BITMASK(mask, masksize);
362 SET_BITMASK_BIT(mask, 0);
363 SET_BITMASK_BIT(mask, 11);
364 SET_BITMASK_BIT(mask, 17);
365 SET_BITMASK_BIT(mask, masksize-1);
366 printk("bits set\n");
367 PRINT_BITMASK(mask, masksize);
368 DECL_BITMASK(mask2, masksize);
369 COPY_BITMASK(mask2, mask, masksize);
370 printk("copy of original mask, should be the same as the prev\n");
371 PRINT_BITMASK(mask2, masksize);
372 CLR_BITMASK_BIT(mask, 11);
373 printk("11 cleared\n");
374 PRINT_BITMASK(mask, masksize);
375 printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
376 printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
377 FILL_BITMASK(mask, masksize);
378 PRINT_BITMASK(mask, masksize);
379 printk("should be all 1's, except for a few at the end\n");
380 printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
381 CLR_BITMASK(mask, masksize);
382 PRINT_BITMASK(mask, masksize);
383 printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
384 printk("should be cleared\n");
387 checklist_t *RO the_global_list;
389 void test_checklist_handler(trapframe_t *tf, void* data)
392 cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
393 down_checklist(the_global_list);
396 void test_checklists(void)
398 INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
399 the_global_list = &a_list;
400 printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
402 PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
403 SET_BITMASK_BIT(a_list.mask.bits, 11);
404 printk("Set bit 11\n");
405 PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
407 CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
408 INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
409 FILL_BITMASK(a_mask.bits, num_cpus);
410 //CLR_BITMASK_BIT(a_mask.bits, core_id());
411 //SET_BITMASK_BIT(a_mask.bits, 1);
412 //printk("New mask (1, 17, 25):\n");
413 printk("Created new mask, filled up to num_cpus\n");
414 PRINT_BITMASK(a_mask.bits, a_mask.size);
415 printk("committing new mask\n");
416 commit_checklist_wait(&a_list, &a_mask);
417 printk("Old mask (copied onto):\n");
418 PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
419 //smp_call_function_single(1, test_checklist_handler, 0, 0);
421 smp_call_function_all(test_checklist_handler, NULL, 0);
423 printk("Waiting on checklist\n");
424 waiton_checklist(&a_list);
425 printk("Done Waiting!\n");
432 void test_incrementer_handler(trapframe_t *tf, atomic_t *data)
434 void test_incrementer_handler(trapframe_t *tf, void *data)
441 void test_null_handler(trapframe_t *tf, void* data)
446 void test_smp_call_functions(void)
452 handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
453 *waiter4 = 0, *waiter5 = 0;
454 uint8_t me = core_id();
455 printk("\nCore %d: SMP Call Self (nowait):\n", me);
456 printk("---------------------\n");
457 smp_call_function_self(test_hello_world_handler, NULL, 0);
458 printk("\nCore %d: SMP Call Self (wait):\n", me);
459 printk("---------------------\n");
460 smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
461 smp_call_wait(waiter0);
462 printk("\nCore %d: SMP Call All (nowait):\n", me);
463 printk("---------------------\n");
464 smp_call_function_all(test_hello_world_handler, NULL, 0);
465 printk("\nCore %d: SMP Call All (wait):\n", me);
466 printk("---------------------\n");
467 smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
468 smp_call_wait(waiter0);
469 printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
470 printk("---------------------\n");
471 for(i = 1; i < num_cpus; i++)
472 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
473 printk("\nCore %d: SMP Call Self (wait):\n", me);
474 printk("---------------------\n");
475 smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
476 smp_call_wait(waiter0);
477 printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
478 printk("---------------------\n");
479 for(i = 1; i < num_cpus; i++)
481 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
482 smp_call_wait(waiter0);
484 printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
485 printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
486 smp_call_function_all(test_incrementer_handler, &a, 0);
487 smp_call_function_all(test_incrementer_handler, &b, 0);
488 smp_call_function_all(test_incrementer_handler, &c, 0);
489 // if i can clobber a previous IPI, the interleaving might do it
490 smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
491 smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
492 smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
493 smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
494 smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
495 smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
496 smp_call_function_all(test_incrementer_handler, &a, 0);
497 smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
498 smp_call_function_all(test_incrementer_handler, &b, 0);
499 smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
500 smp_call_function_all(test_incrementer_handler, &c, 0);
501 smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
502 // wait, so we're sure the others finish before printing.
503 // without this, we could (and did) get 19,18,19, since the B_inc
504 // handler didn't finish yet
505 smp_call_function_self(test_null_handler, NULL, &waiter0);
506 // need to grab all 5 handlers (max), since the code moves to the next free.
507 smp_call_function_self(test_null_handler, NULL, &waiter1);
508 smp_call_function_self(test_null_handler, NULL, &waiter2);
509 smp_call_function_self(test_null_handler, NULL, &waiter3);
510 smp_call_function_self(test_null_handler, NULL, &waiter4);
511 smp_call_wait(waiter0);
512 smp_call_wait(waiter1);
513 smp_call_wait(waiter2);
514 smp_call_wait(waiter3);
515 smp_call_wait(waiter4);
516 printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
517 printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
518 smp_call_function_self(test_null_handler, NULL, &waiter0);
519 printk("Sent one\n");
520 smp_call_function_self(test_null_handler, NULL, &waiter1);
521 printk("Sent two\n");
522 smp_call_wait(waiter0);
523 printk("Wait one\n");
524 smp_call_wait(waiter1);
525 printk("Wait two\n");
526 printk("\tMade it through!\n");
527 printk("Attempting to deadlock by smp_calling more than are available:\n");
528 printk("\tShould see an Insufficient message and a kernel warning.\n");
529 if (smp_call_function_self(test_null_handler, NULL, &waiter0))
530 printk("\tInsufficient handlers to call function (0)\n");
531 if (smp_call_function_self(test_null_handler, NULL, &waiter1))
532 printk("\tInsufficient handlers to call function (1)\n");
533 if (smp_call_function_self(test_null_handler, NULL, &waiter2))
534 printk("\tInsufficient handlers to call function (2)\n");
535 if (smp_call_function_self(test_null_handler, NULL, &waiter3))
536 printk("\tInsufficient handlers to call function (3)\n");
537 if (smp_call_function_self(test_null_handler, NULL, &waiter4))
538 printk("\tInsufficient handlers to call function (4)\n");
539 if (smp_call_function_self(test_null_handler, NULL, &waiter5))
540 printk("\tInsufficient handlers to call function (5)\n");
541 smp_call_wait(waiter0);
542 smp_call_wait(waiter1);
543 smp_call_wait(waiter2);
544 smp_call_wait(waiter3);
545 smp_call_wait(waiter4);
546 smp_call_wait(waiter5);
547 printk("\tMade it through!\n");
553 void test_lapic_status_bit(void)
555 register_interrupt_handler(interrupt_handlers, I_TESTING,
556 test_incrementer_handler, &a);
557 #define NUM_IPI 100000
559 printk("IPIs received (should be 0): %d\n", a);
560 for(int i = 0; i < NUM_IPI; i++) {
561 send_ipi(get_hw_coreid(7), I_TESTING);
562 lapic_wait_to_send();
564 // need to wait a bit to let those IPIs get there
566 printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
567 // hopefully that handler never fires again. leaving it registered for now.
571 /************************************************************/
572 /* ISR Handler Functions */
574 void test_hello_world_handler(trapframe_t *tf, void* data)
577 #if defined(__i386__)
578 trapno = tf->tf_trapno;
579 #elif defined(__sparc_v8__)
580 trapno = (tf->tbr >> 4) & 0xFF;
585 cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
586 trapno, core_id(), tf);
589 spinlock_t print_info_lock = SPINLOCK_INITIALIZER;
591 void test_print_info_handler(trapframe_t *tf, void* data)
593 uint64_t tsc = read_tsc();
595 spin_lock_irqsave(&print_info_lock);
596 cprintf("----------------------------\n");
597 cprintf("This is Core %d\n", core_id());
598 cprintf("Timestamp = %lld\n", tsc);
600 cprintf("Hardware core %d\n", hw_core_id());
601 cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
602 cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
603 read_msr(0x200), read_msr(0x201));
604 cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
605 read_msr(0x202), read_msr(0x203));
606 cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
607 read_msr(0x204), read_msr(0x205));
608 cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
609 read_msr(0x206), read_msr(0x207));
610 cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
611 read_msr(0x208), read_msr(0x209));
612 cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
613 read_msr(0x20a), read_msr(0x20b));
614 cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
615 read_msr(0x20c), read_msr(0x20d));
616 cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
617 read_msr(0x20e), read_msr(0x20f));
619 cprintf("----------------------------\n");
620 spin_unlock_irqsave(&print_info_lock);
623 void test_barrier_handler(trapframe_t *tf, void* data)
625 cprintf("Round 1: Core %d\n", core_id());
626 waiton_barrier(&test_cpu_array);
627 waiton_barrier(&test_cpu_array);
628 waiton_barrier(&test_cpu_array);
629 waiton_barrier(&test_cpu_array);
630 waiton_barrier(&test_cpu_array);
631 waiton_barrier(&test_cpu_array);
632 cprintf("Round 2: Core %d\n", core_id());
633 waiton_barrier(&test_cpu_array);
634 cprintf("Round 3: Core %d\n", core_id());
635 // uncomment to see it fucked up
636 //cprintf("Round 4: Core %d\n", core_id());
640 static void test_waiting_handler(trapframe_t *tf, atomic_t *data)
642 static void test_waiting_handler(trapframe_t *tf, void *data)
651 cprintf("Starting test for PIT now (10s)\n");
652 udelay_pit(10000000);
653 cprintf("End now\n");
654 cprintf("Starting test for TSC (if stable) now (10s)\n");
656 cprintf("End now\n");
658 cprintf("Starting test for LAPIC (if stable) now (10s)\n");
660 lapic_set_timer(10000000, FALSE);
663 atomic_init(&waiting, 1);
664 register_interrupt_handler(interrupt_handlers, I_TESTING,
665 test_waiting_handler, &waiting);
666 while(atomic_read(&waiting))
668 cprintf("End now\n");
671 void test_circ_buffer(void)
673 int arr[5] = {0, 1, 2, 3, 4};
675 for (int i = 0; i < 5; i++) {
676 FOR_CIRC_BUFFER(i, 5, j)
677 printk("Starting with current = %d, each value = %d\n", i, j);
682 void test_km_handler(struct trapframe *tf, uint32_t srcid, long a0, long a1,
685 printk("Received KM on core %d from core %d: arg0= 0x%08x, arg1 = "
686 "0x%08x, arg2 = 0x%08x\n", core_id(), srcid, a0, a1, a2);
690 void test_kernel_messages(void)
692 printk("Testing Kernel Messages\n");
693 /* Testing sending multiples, sending different types, alternating, and
694 * precendence (the immediates should trump the others) */
695 printk("sending 5 IMMED to core 1, sending (#,deadbeef,0)\n");
696 for (int i = 0; i < 5; i++)
697 send_kernel_message(1, test_km_handler, (long)i, 0xdeadbeef, 0,
700 printk("sending 5 routine to core 1, sending (#,cafebabe,0)\n");
701 for (int i = 0; i < 5; i++)
702 send_kernel_message(1, test_km_handler, (long)i, 0xcafebabe, 0,
705 printk("sending 10 routine and 3 immediate to core 2\n");
706 for (int i = 0; i < 10; i++)
707 send_kernel_message(2, test_km_handler, (long)i, 0xcafebabe, 0,
709 for (int i = 0; i < 3; i++)
710 send_kernel_message(2, test_km_handler, (long)i, 0xdeadbeef, 0,
713 printk("sending 5 ea alternating to core 2\n");
714 for (int i = 0; i < 5; i++) {
715 send_kernel_message(2, test_km_handler, (long)i, 0xdeadbeef, 0,
717 send_kernel_message(2, test_km_handler, (long)i, 0xcafebabe, 0,
724 static void test_single_cache(int iters, size_t size, int align, int flags,
725 void (*ctor)(void *, size_t),
726 void (*dtor)(void *, size_t))
728 struct kmem_cache *test_cache;
729 void *objects[iters];
730 test_cache = kmem_cache_create("test_cache", size, align, flags, ctor, dtor);
731 printk("Testing Kmem Cache:\n");
732 print_kmem_cache(test_cache);
733 for (int i = 0; i < iters; i++) {
734 objects[i] = kmem_cache_alloc(test_cache, 0);
735 printk("Buffer %d addr = %p\n", i, objects[i]);
737 for (int i = 0; i < iters; i++) {
738 kmem_cache_free(test_cache, objects[i]);
740 kmem_cache_destroy(test_cache);
744 void a_ctor(void *buf, size_t size)
746 printk("constructin tests\n");
748 void a_dtor(void *buf, size_t size)
750 printk("destructin tests\n");
755 test_single_cache(10, 128, 512, 0, 0, 0);
756 test_single_cache(10, 128, 4, 0, a_ctor, a_dtor);
757 test_single_cache(10, 1024, 16, 0, 0, 0);
760 void test_kmalloc(void)
762 printk("Testing Kmalloc\n");
763 void *bufs[NUM_KMALLOC_CACHES + 1];
765 for (int i = 0; i < NUM_KMALLOC_CACHES + 1; i++){
766 size = (KMALLOC_SMALLEST << i) - KMALLOC_OFFSET;
767 bufs[i] = kmalloc(size, 0);
768 printk("Size %d, Addr = %p\n", size, bufs[i]);
770 for (int i = 0; i < NUM_KMALLOC_CACHES; i++) {
771 printk("Freeing buffer %d\n", i);
774 printk("Testing a large kmalloc\n");
775 size = (KMALLOC_LARGEST << 2);
776 bufs[0] = kmalloc(size, 0);
777 printk("Size %d, Addr = %p\n", size, bufs[0]);
781 static size_t test_hash_fn_col(void *k)
783 return (size_t)k % 2; // collisions in slots 0 and 1
786 void test_hashtable(void)
788 struct test {int x; int y;};
789 struct test tstruct[10];
793 struct test *v = &tstruct[0];
795 h = create_hashtable(32, __generic_hash, __generic_eq);
797 // test inserting one item, then finding it again
798 printk("Tesing one item, insert, search, and removal\n");
799 if(!hashtable_insert(h, (void*)k, v))
800 printk("Failed to insert to hashtable!\n");
802 if (!(v = hashtable_search(h, (void*)k)))
803 printk("Failed to find in hashtable!\n");
804 if (v != &tstruct[0])
805 printk("Got the wrong item! (got %p, wanted %p)\n", v, &tstruct[0]);
807 if (!(v = hashtable_remove(h, (void*)k)))
808 printk("Failed to remove from hashtable!\n");
809 // shouldn't be able to find it again
810 if ((v = hashtable_search(h, (void*)k)))
811 printk("Should not have been able to find in hashtable!\n");
813 printk("Tesing a bunch of items, insert, search, and removal\n");
814 for (int i = 0; i < 10; i++) {
815 k = i; // vary the key, we don't do KEY collisions
816 if(!hashtable_insert(h, (void*)k, &tstruct[i]))
817 printk("Failed to insert iter %d to hashtable!\n", i);
819 // read out the 10 items
820 for (int i = 0; i < 10; i++) {
822 if (!(v = hashtable_search(h, (void*)k)))
823 printk("Failed to find in hashtable!\n");
824 if (v != &tstruct[i])
825 printk("Got the wrong item! (got %p, wanted %p)\n", v, &tstruct[i]);
827 if (hashtable_count(h) != 10)
828 printk("Wrong accounting of number of elements!\n");
829 // remove the 10 items
830 for (int i = 0; i < 10; i++) {
832 if (!(v = hashtable_remove(h, (void*)k)))
833 printk("Failed to remove from hashtable!\n");
835 // make sure they are all gone
836 for (int i = 0; i < 10; i++) {
838 if ((v = hashtable_search(h, (void*)k)))
839 printk("Should not have been able to find in hashtable!\n");
841 if (hashtable_count(h))
842 printk("Wrong accounting of number of elements!\n");
843 hashtable_destroy(h);
845 // same test of a bunch of items, but with collisions.
846 printk("Tesing a bunch of items with collisions, etc.\n");
847 h = create_hashtable(32, test_hash_fn_col, __generic_eq);
849 for (int i = 0; i < 10; i++) {
850 k = i; // vary the key, we don't do KEY collisions
851 if(!hashtable_insert(h, (void*)k, &tstruct[i]))
852 printk("Failed to insert iter %d to hashtable!\n", i);
854 // read out the 10 items
855 for (int i = 0; i < 10; i++) {
857 if (!(v = hashtable_search(h, (void*)k)))
858 printk("Failed to find in hashtable!\n");
859 if (v != &tstruct[i])
860 printk("Got the wrong item! (got %p, wanted %p)\n", v, &tstruct[i]);
862 if (hashtable_count(h) != 10)
863 printk("Wrong accounting of number of elements!\n");
864 // remove the 10 items
865 for (int i = 0; i < 10; i++) {
867 if (!(v = hashtable_remove(h, (void*)k)))
868 printk("Failed to remove from hashtable!\n");
870 // make sure they are all gone
871 for (int i = 0; i < 10; i++) {
873 if ((v = hashtable_search(h, (void*)k)))
874 printk("Should not have been able to find in hashtable!\n");
876 if (hashtable_count(h))
877 printk("Wrong accounting of number of elements!\n");
878 hashtable_destroy(h);
881 /* Ghetto test, only tests one prod or consumer at a time */
884 /* Tests a basic struct */
889 struct my_struct in_struct, out_struct;
891 DEFINE_BCQ_TYPES(test, struct my_struct, 16);
892 struct test_bcq t_bcq;
893 bcq_init(&t_bcq, struct my_struct, 16);
900 bcq_enqueue(&t_bcq, &in_struct, 16, 5);
901 bcq_dequeue(&t_bcq, &out_struct, 16);
902 printk("out x %d. out y %d\n", out_struct.x, out_struct.y);
904 /* Tests the BCQ a bit more, esp with overflow */
905 #define NR_ELEM_A_BCQ 8 /* NOTE: this must be a power of 2! */
906 DEFINE_BCQ_TYPES(my, int, NR_ELEM_A_BCQ);
908 bcq_init(&a_bcq, int, NR_ELEM_A_BCQ);
914 /* Helpful debugger */
915 void print_a_bcq(struct my_bcq *bcq)
917 printk("A BCQ (made of ints): %08p\n", bcq);
918 printk("\tprod_idx: %08p\n", bcq->hdr.prod_idx);
919 printk("\tcons_pub_idx: %08p\n", bcq->hdr.cons_pub_idx);
920 printk("\tcons_pvt_idx: %08p\n", bcq->hdr.cons_pvt_idx);
921 for (int i = 0; i < NR_ELEM_A_BCQ; i++) {
922 printk("Element %d, rdy_for_cons: %02p\n", i,
923 bcq->wraps[i].rdy_for_cons);
927 /* Put in more than it can take */
928 for (int i = 0; i < 15; i++) {
930 retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
931 printk("enqueued: %d, had retval %d \n", y, retval[i]);
933 //print_a_bcq(&a_bcq);
935 /* Try to dequeue more than we put in */
936 for (int i = 0; i < 15; i++) {
937 retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
938 printk("dequeued: %d with retval %d\n", output[i], retval[i]);
940 //print_a_bcq(&a_bcq);
942 /* Put in some it should be able to take */
943 for (int i = 0; i < 3; i++) {
945 retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
946 printk("enqueued: %d, had retval %d \n", y, retval[i]);
949 /* Take those, and then a couple extra */
950 for (int i = 0; i < 5; i++) {
951 retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
952 printk("dequeued: %d with retval %d\n", output[i], retval[i]);
955 /* Try some one-for-one */
956 for (int i = 0; i < 5; i++) {
958 retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
959 printk("enqueued: %d, had retval %d \n", y, retval[i]);
960 retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
961 printk("dequeued: %d with retval %d\n", output[i], retval[i]);
965 /* Test a simple concurrent send and receive (one prod, one cons). We spawn a
966 * process that will go into _M mode on another core, and we'll do the test from
967 * an alarm handler run on our core. When we start up the process, we won't
968 * return so we need to defer the work with an alarm. */
971 struct timer_chain *tchain = &per_cpu_info[core_id()].tchain;
972 struct alarm_waiter *waiter = kmalloc(sizeof(struct alarm_waiter), 0);
974 /* Alarm handler: what we want to do after the process is up */
975 void send_msgs(struct alarm_waiter *waiter)
977 struct timer_chain *tchain;
978 struct proc *old_proc, *p = waiter->data;
979 struct ucq *ucq = (struct ucq*)USTACKTOP;
980 struct event_msg msg;
982 printk("Running the alarm handler!\n");
983 printk("NR msg per page: %d\n", NR_MSG_PER_PAGE);
984 /* might not be mmaped yet, if not, abort */
985 if (!user_mem_check(p, ucq, PGSIZE, 1, PTE_USER_RW)) {
986 printk("Not mmaped yet\n");
989 /* load their address space */
990 old_proc = switch_to(p);
991 /* So it's mmaped, see if it is ready (note that this is dangerous) */
992 if (!ucq->ucq_ready) {
993 printk("Not ready yet\n");
994 switch_back(p, old_proc);
997 /* So it's ready, time to finally do the tests... */
998 printk("[kernel] Finally starting the tests... \n");
999 /* 1: Send a simple message */
1000 printk("[kernel] #1 Sending simple message (7, deadbeef)\n");
1002 msg.ev_arg2 = 0xdeadbeef;
1003 send_ucq_msg(ucq, p, &msg);
1004 printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
1005 /* 2: Send a bunch. In a VM, this causes one swap, and then a bunch of
1007 printk("[kernel] #2 \n");
1008 for (int i = 0; i < 5000; i++) {
1010 send_ucq_msg(ucq, p, &msg);
1012 printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
1013 printk("[kernel] #3 \n");
1014 /* 3: make sure we chained pages (assuming 1k is enough) */
1015 for (int i = 0; i < 1000; i++) {
1017 send_ucq_msg(ucq, p, &msg);
1019 printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
1020 /* other things we could do:
1021 * - concurrent producers / consumers... ugh.
1022 * - would require a kmsg to another core, instead of a local alarm
1024 /* done, switch back and free things */
1025 switch_back(p, old_proc);
1027 kfree(waiter); /* since it was kmalloc()d */
1030 tchain = &per_cpu_info[core_id()].tchain;
1031 /* Set to run again */
1032 set_awaiter_rel(waiter, 1000000);
1033 set_alarm(tchain, waiter);
1035 /* Set up a handler to run the real part of the test */
1036 init_awaiter(waiter, send_msgs);
1037 set_awaiter_rel(waiter, 1000000); /* 1s should be long enough */
1038 set_alarm(tchain, waiter);
1039 /* Just spawn the program */
1040 struct file *program;
1041 program = do_file_open("/bin/ucq", 0, 0);
1043 printk("Unable to find /bin/ucq!\n");
1046 char *p_envp[] = {"LD_LIBRARY_PATH=/lib", 0};
1047 struct proc *p = proc_create(program, 0, p_envp);
1049 /* instead of getting rid of the reference created in proc_create, we'll put
1050 * it in the awaiter */
1052 kref_put(&program->f_kref);
1053 /* Should never return from schedule (env_pop in there) also note you may
1054 * not get the process you created, in the event there are others floating
1055 * around that are runnable */
1061 /* rudimentary tests. does the basics, create, merge, split, etc. Feel free to
1062 * add more, esp for the error conditions and finding free slots. This is also
1063 * a bit lazy with setting the caller's fields (perm, flags, etc). */
1064 void test_vm_regions(void)
1066 #define MAX_VMR_TESTS 10
1067 struct proc pr, *p = ≺ /* too lazy to even create one */
1069 TAILQ_INIT(&p->vm_regions);
1071 struct vmr_summary {
1075 int check_vmrs(struct proc *p, struct vmr_summary *results, int len, int n)
1078 struct vm_region *vmr;
1079 TAILQ_FOREACH(vmr, &p->vm_regions, vm_link) {
1081 printk("More vm_regions than expected\n");
1084 if ((vmr->vm_base != results[count].base) ||
1085 (vmr->vm_end != results[count].end)) {
1086 printk("VM test case %d failed!\n", n);
1094 struct vm_region *vmrs[MAX_VMR_TESTS];
1095 struct vmr_summary results[MAX_VMR_TESTS];
1097 memset(results, 0, sizeof(results));
1099 vmrs[0] = create_vmr(p, 0x2000, 0x1000);
1100 results[0].base = 0x2000;
1101 results[0].end = 0x3000;
1102 check_vmrs(p, results, 1, n++);
1104 grow_vmr(vmrs[0], 0x4000);
1105 results[0].base = 0x2000;
1106 results[0].end = 0x4000;
1107 check_vmrs(p, results, 1, n++);
1108 /* Grow it poorly */
1109 if (-1 != grow_vmr(vmrs[0], 0x3000))
1110 printk("Bad grow test failed\n");
1111 check_vmrs(p, results, 1, n++);
1112 /* Make another right next to it */
1113 vmrs[1] = create_vmr(p, 0x4000, 0x1000);
1114 results[1].base = 0x4000;
1115 results[1].end = 0x5000;
1116 check_vmrs(p, results, 2, n++);
1117 /* try to grow through it */
1118 if (-1 != grow_vmr(vmrs[0], 0x5000))
1119 printk("Bad grow test failed\n");
1120 check_vmrs(p, results, 2, n++);
1122 merge_vmr(vmrs[0], vmrs[1]);
1123 results[0].end = 0x5000;
1124 results[1].base = 0;
1126 check_vmrs(p, results, 1, n++);
1127 vmrs[1]= create_vmr(p, 0x6000, 0x4000);
1128 results[1].base = 0x6000;
1129 results[1].end = 0xa000;
1130 check_vmrs(p, results, 2, n++);
1131 /* try to merge unmergables (just testing ranges) */
1132 if (-1 != merge_vmr(vmrs[0], vmrs[1]))
1133 printk("Bad merge test failed\n");
1134 check_vmrs(p, results, 2, n++);
1135 vmrs[2] = split_vmr(vmrs[1], 0x8000);
1136 results[1].end = 0x8000;
1137 results[2].base = 0x8000;
1138 results[2].end = 0xa000;
1139 check_vmrs(p, results, 3, n++);
1141 destroy_vmr(vmrs[1]);
1142 results[1].base = 0x8000;
1143 results[1].end = 0xa000;
1144 check_vmrs(p, results, 2, n++);
1146 shrink_vmr(vmrs[2], 0x9000);
1147 results[1].base = 0x8000;
1148 results[1].end = 0x9000;
1149 check_vmrs(p, results, 2, n++); /* 10 */
1150 if (vmrs[2] != find_vmr(p, 0x8500))
1151 printk("Failed to find the right vmr!\n");
1152 if (vmrs[2] != find_first_vmr(p, 0x8500))
1153 printk("Failed to find the right vmr!\n");
1154 if (vmrs[2] != find_first_vmr(p, 0x7500))
1155 printk("Failed to find the right vmr!\n");
1156 if (find_first_vmr(p, 0x9500))
1157 printk("Found a vmr when we shouldn't!\n");
1158 /* grow up to another */
1159 grow_vmr(vmrs[0], 0x8000);
1160 results[0].end = 0x8000;
1161 check_vmrs(p, results, 2, n++);
1162 vmrs[0]->vm_prot = 88;
1163 vmrs[2]->vm_prot = 77;
1164 /* should be unmergeable due to perms */
1165 if (-1 != merge_vmr(vmrs[0], vmrs[2]))
1166 printk("Bad merge test failed\n");
1167 check_vmrs(p, results, 2, n++);
1168 /* should merge now */
1169 vmrs[2]->vm_prot = 88;
1170 merge_vmr(vmrs[0], vmrs[2]);
1171 results[0].end = 0x9000;
1172 check_vmrs(p, results, 1, n++);
1173 destroy_vmr(vmrs[0]);
1174 check_vmrs(p, results, 0, n++);
1175 /* Check the automerge function */
1176 vmrs[0] = create_vmr(p, 0x2000, 0x1000);
1177 vmrs[1] = create_vmr(p, 0x3000, 0x1000);
1178 vmrs[2] = create_vmr(p, 0x4000, 0x1000);
1179 for (int i = 0; i < 3; i++) {
1180 vmrs[i]->vm_prot = PROT_READ;
1181 vmrs[i]->vm_flags = 0;
1182 vmrs[i]->vm_file = 0; /* would like to test this, it's a pain for now */
1184 vmrs[0] = merge_me(vmrs[1]);
1185 results[0].base = 0x2000;
1186 results[0].end = 0x5000;
1187 check_vmrs(p, results, 1, n++);
1188 destroy_vmr(vmrs[0]);
1189 check_vmrs(p, results, 0, n++);
1190 /* Check unfixed creation requests */
1191 vmrs[0] = create_vmr(p, 0x0000, 0x1000);
1192 vmrs[1] = create_vmr(p, 0x0000, 0x1000);
1193 vmrs[2] = create_vmr(p, 0x0000, 0x1000);
1194 results[0].base = 0x0000;
1195 results[0].end = 0x1000;
1196 results[1].base = 0x1000;
1197 results[1].end = 0x2000;
1198 results[2].base = 0x2000;
1199 results[2].end = 0x3000;
1200 check_vmrs(p, results, 3, n++);
1202 printk("Finished vm_regions test!\n");
1205 void test_radix_tree(void)
1207 struct radix_tree real_tree = RADIX_INITIALIZER;
1208 struct radix_tree *tree = &real_tree;
1211 if (radix_insert(tree, 0, (void*)0xdeadbeef))
1212 printk("Failed to insert at 0!\n");
1213 radix_delete(tree, 0);
1214 if (radix_insert(tree, 0, (void*)0xdeadbeef))
1215 printk("Failed to re-insert at 0!\n");
1217 if (radix_insert(tree, 3, (void*)0xdeadbeef))
1218 printk("Failed to insert first!\n");
1219 radix_insert(tree, 4, (void*)0x04040404);
1220 assert((void*)0xdeadbeef == radix_lookup(tree, 3));
1221 for (int i = 5; i < 100; i++)
1222 if ((retval = radix_lookup(tree, i))) {
1223 printk("Extra item %08p at slot %d in tree %08p\n", retval, i,
1225 print_radix_tree(tree);
1228 if (radix_insert(tree, 65, (void*)0xcafebabe))
1229 printk("Failed to insert a two-tier!\n");
1230 if (!radix_insert(tree, 4, (void*)0x03030303))
1231 printk("Should not let us reinsert\n");
1232 if (radix_insert(tree, 4095, (void*)0x4095))
1233 printk("Failed to insert a two-tier boundary!\n");
1234 if (radix_insert(tree, 4096, (void*)0x4096))
1235 printk("Failed to insert a three-tier!\n");
1236 //print_radix_tree(tree);
1237 radix_delete(tree, 65);
1238 radix_delete(tree, 3);
1239 radix_delete(tree, 4);
1240 radix_delete(tree, 4095);
1241 radix_delete(tree, 4096);
1242 //print_radix_tree(tree);
1243 printk("Finished radix tree tests!\n");
1246 /* Assorted FS tests, which were hanging around in init.c */
1247 void test_random_fs(void)
1249 int retval = do_symlink("/dir1/sym", "/bin/hello", S_IRWXU);
1251 printk("symlink1 creation failed\n");
1252 retval = do_symlink("/symdir", "/dir1/dir1-1", S_IRWXU);
1254 printk("symlink1 creation failed\n");
1255 retval = do_symlink("/dir1/test.txt", "/dir2/test2.txt", S_IRWXU);
1257 printk("symlink2 creation failed\n");
1258 retval = do_symlink("/dir1/dir1-1/up", "../../", S_IRWXU);
1260 printk("symlink3 creation failed\n");
1261 retval = do_symlink("/bin/hello-sym", "hello", S_IRWXU);
1263 printk("symlink4 creation failed\n");
1265 struct dentry *dentry;
1266 struct nameidata nd_r = {0}, *nd = &nd_r;
1267 retval = path_lookup("/dir1/sym", 0, nd);
1269 printk("symlink lookup failed: %d\n", retval);
1270 char *symname = nd->dentry->d_inode->i_op->readlink(nd->dentry);
1271 printk("Pathlookup got %s (sym)\n", nd->dentry->d_name.name);
1273 printk("symlink reading failed\n");
1275 printk("Symname: %s (/bin/hello)\n", symname);
1277 /* try with follow */
1278 memset(nd, 0, sizeof(struct nameidata));
1279 retval = path_lookup("/dir1/sym", LOOKUP_FOLLOW, nd);
1281 printk("symlink lookup failed: %d\n", retval);
1282 printk("Pathlookup got %s (hello)\n", nd->dentry->d_name.name);
1285 /* try with a directory */
1286 memset(nd, 0, sizeof(struct nameidata));
1287 retval = path_lookup("/symdir/f1-1.txt", 0, nd);
1289 printk("symlink lookup failed: %d\n", retval);
1290 printk("Pathlookup got %s (f1-1.txt)\n", nd->dentry->d_name.name);
1293 /* try with a rel path */
1294 printk("Try with a rel path\n");
1295 memset(nd, 0, sizeof(struct nameidata));
1296 retval = path_lookup("/symdir/up/hello.txt", 0, nd);
1298 printk("symlink lookup failed: %d\n", retval);
1299 printk("Pathlookup got %s (hello.txt)\n", nd->dentry->d_name.name);
1302 printk("Try for an ELOOP\n");
1303 memset(nd, 0, sizeof(struct nameidata));
1304 retval = path_lookup("/symdir/up/symdir/up/symdir/up/symdir/up/hello.txt", 0, nd);
1306 printk("Symlink lookup failed (it should): %d (-40)\n", retval);
1310 /* simple test - start one, do something else, and resume it. For lack of a
1311 * better infrastructure, we send ourselves a kmsg to run the kthread, which
1312 * we'll handle in smp_idle (which you may have to manually call). Note this
1313 * doesn't test things like memory being leaked, or dealing with processes. */
1314 void test_kthreads(void)
1316 /* Kernel message to restart our kthread */
1317 void test_up_sem(struct trapframe *tf, uint32_t srcid, long a0, long a1,
1320 struct semaphore *sem = (struct semaphore*)a0;
1321 struct kthread *kthread;
1322 printk("[kmsg] Upping the sem to start the kthread, stacktop is %08p\n",
1324 kthread = __up_sem(sem, FALSE);
1326 printk("[kmsg] Crap, the sem didn't have a kthread waiting!\n");
1329 printk("[kmsg] Restarting the kthread...\n");
1330 restart_kthread(kthread);
1331 panic("[kmsg] Damnit...");
1333 struct semaphore sem;
1334 init_sem(&sem, 1); /* set to 1 to test the unwind */
1335 printk("We're a kthread! Stacktop is %08p. Testing suspend, etc...\n",
1337 /* So we have something that will wake us up. Routine messages won't get
1338 * serviced in the kernel right away. */
1339 send_kernel_message(core_id(), test_up_sem, (long)&sem, 0, 0,
1341 /* Actually block (or try to) */
1342 /* This one shouldn't block - but will test the unwind (if 1 above) */
1343 printk("About to sleep, but should unwind (signal beat us)\n");
1345 /* This one is for real, yo. Run and tell that. */
1346 printk("About to sleep for real\n");
1348 printk("Kthread restarted!, Stacktop is %08p.\n", get_stack_top());
1351 /* Runs a simple test between core 0 (caller) and core 2 */
1352 void test_kref(void)
1354 struct kref local_kref;
1356 /* Second player's kmsg */
1357 void test_kref_2(struct trapframe *tf, uint32_t srcid, long a0, long a1,
1360 struct kref *kref = (struct kref*)a0;
1361 bool *done = (bool*)a1;
1363 for (int i = 0; i < 10000000; i++) {
1365 set_core_timer(1, TRUE);
1372 kref_init(&local_kref, fake_release, 1);
1373 send_kernel_message(2, test_kref_2, (long)&local_kref, (long)&done, 0,
1375 for (int i = 0; i < 10000000; i++) {
1376 kref_get(&local_kref, 1);
1378 kref_put(&local_kref);
1382 assert(kref_refcnt(&local_kref) == 1);
1383 printk("[TEST-KREF] Simple 2-core getting/putting passed.\n");
1386 void test_atomics(void)
1388 /* subtract_and_test */
1390 /* Test subing to 0 */
1391 atomic_init(&num, 1);
1392 assert(atomic_sub_and_test(&num, 1) == 1);
1393 atomic_init(&num, 2);
1394 assert(atomic_sub_and_test(&num, 2) == 1);
1395 /* Test not getting to 0 */
1396 atomic_init(&num, 1);
1397 assert(atomic_sub_and_test(&num, 0) == 0);
1398 atomic_init(&num, 2);
1399 assert(atomic_sub_and_test(&num, 1) == 0);
1400 /* Test negatives */
1401 atomic_init(&num, -1);
1402 assert(atomic_sub_and_test(&num, 1) == 0);
1403 atomic_init(&num, -1);
1404 assert(atomic_sub_and_test(&num, -1) == 1);
1405 /* Test larger nums */
1406 atomic_init(&num, 265);
1407 assert(atomic_sub_and_test(&num, 265) == 1);
1408 atomic_init(&num, 265);
1409 assert(atomic_sub_and_test(&num, 2) == 0);
1412 /* Simple test, make sure the bool retval of CAS handles failure */
1413 void test_cas_val(long init_val)
1415 atomic_t actual_num;
1418 atomic_init(&actual_num, init_val);
1421 old_num = atomic_read(&actual_num);
1422 /* First time, try to fail */
1426 } while (!atomic_cas(&actual_num, old_num, old_num + 10));
1427 if (atomic_read(&actual_num) != init_val + 10)
1428 printk("FUCK, CAS test failed for %d\n", init_val);
1434 /* x86 test, making sure our cpu_halt() and irq_handler() work. If you want to
1435 * see it fail, you'll probably need to put a nop in the asm for cpu_halt(), and
1436 * comment out abort_halt() in irq_handler(). */
1437 void test_abort_halt(void)
1440 /* Core 1 does this, while core 0 hammers it with interrupts */
1441 void test_try_halt(struct trapframe *tf, uint32_t srcid, long a0, long a1,
1445 /* wait 10 sec. should have a bunch of ints pending */
1447 printk("Core 1 is about to halt\n");
1449 printk("Returned from halting on core 1\n");
1451 send_kernel_message(1, test_try_halt, 0, 0, 0, KMSG_ROUTINE);
1452 /* wait 1 sec, enough time to for core 1 to be in its KMSG */
1455 send_ipi(get_hw_coreid(0x01), I_TESTING);
1456 printk("Core 0 sent the IPI\n");
1457 #endif /* __i386__ */
1462 static struct cond_var local_cv;
1463 static atomic_t counter;
1464 struct cond_var *cv = &local_cv;
1466 volatile bool state = FALSE; /* for test 3 */
1468 void __test_cv_signal(struct trapframe *tf, uint32_t srcid, long a0,
1471 if (atomic_read(&counter) % 4)
1475 atomic_dec(&counter);
1478 void __test_cv_waiter(struct trapframe *tf, uint32_t srcid, long a0,
1482 /* check state, etc */
1483 cv_wait_and_unlock(cv);
1484 atomic_dec(&counter);
1487 void __test_cv_waiter_t3(struct trapframe *tf, uint32_t srcid, long a0,
1491 /* if state == false, we haven't seen the signal yet */
1493 /* this way is a little more verbose, but avoids unnecessary locking */
1496 /* first check is an optimization */
1502 cv_wait_and_unlock(cv);
1505 /* this is the more traditional CV style */
1509 cv_wait(cv); /* unlocks and relocks */
1513 /* Make sure we are done, tell the controller we are done */
1516 atomic_dec(&counter);
1517 smp_idle(); /* kmsgs that might block cannot return! */
1521 /* Test 0: signal without waiting */
1525 printk("test_cv: signal without waiting complete\n");
1527 /* Test 1: single / minimal shit */
1528 nr_msgs = num_cpus - 1; /* not using cpu 0 */
1529 atomic_init(&counter, nr_msgs);
1530 for (int i = 1; i < num_cpus; i++)
1531 send_kernel_message(i, __test_cv_waiter, 0, 0, 0, KMSG_ROUTINE);
1535 while (atomic_read(&counter) != nr_msgs - 1)
1537 printk("test_cv: single signal complete\n");
1539 /* broadcast probably woke up the waiters on our core. since we want to
1540 * spin on their completion, we need to yield for a bit. */
1542 while (atomic_read(&counter))
1544 printk("test_cv: broadcast signal complete\n");
1546 /* Test 2: shitloads of waiters and signalers */
1547 nr_msgs = 0x500; /* any more than 0x20000 could go OOM */
1548 atomic_init(&counter, nr_msgs);
1549 for (int i = 0; i < nr_msgs; i++) {
1550 int cpu = (i % (num_cpus - 1)) + 1;
1551 if (atomic_read(&counter) % 5)
1552 send_kernel_message(cpu, __test_cv_waiter, 0, 0, 0, KMSG_ROUTINE);
1554 send_kernel_message(cpu, __test_cv_signal, 0, 0, 0, KMSG_ROUTINE);
1556 kthread_yield(); /* run whatever messages we sent to ourselves */
1557 while (atomic_read(&counter)) {
1561 kthread_yield(); /* run whatever messages we sent to ourselves */
1563 assert(!cv->nr_waiters);
1564 printk("test_cv: massive message storm complete\n");
1566 /* Test 3: basic one signaller, one receiver. we want to vary the amount of
1567 * time the sender and receiver delays, starting with (1ms, 0ms) and ending
1568 * with (0ms, 1ms). At each extreme, such as with the sender waiting 1ms,
1569 * the receiver/waiter should hit the "check and wait" point well before the
1570 * sender/signaller hits the "change state and signal" point. */
1571 for (int i = 0; i < 1000; i++) {
1572 for (int j = 0; j < 10; j++) { /* some extra chances at each point */
1574 atomic_init(&counter, 1); /* signal that the client is done */
1575 /* client waits for i usec */
1576 send_kernel_message(2, __test_cv_waiter_t3, i, 0, 0, KMSG_ROUTINE);
1578 udelay(1000 - i); /* senders wait time: 1000..0 */
1581 /* signal might have unblocked a kthread, let it run */
1583 /* they might not have run at all yet (in which case they lost the
1584 * race and don't need the signal). but we need to wait til they're
1586 while (atomic_read(&counter))
1588 assert(!cv->nr_waiters);
1591 printk("test_cv: single sender/receiver complete\n");