#include <arch/mmu.h>
#include <arch/arch.h>
-#include <arch/bitmask.h>
+#include <bitmask.h>
#include <smp.h>
#include <ros/memlayout.h>
#include <ros/common.h>
#include <ros/bcq.h>
+#include <ros/ucq.h>
#include <atomic.h>
#include <stdio.h>
#include <string.h>
#include <testing.h>
#include <trap.h>
-#include <arch/trap.h>
+#include <trap.h>
#include <process.h>
#include <syscall.h>
-#include <timing.h>
+#include <time.h>
#include <kfs.h>
#include <multiboot.h>
#include <pmap.h>
#include <slab.h>
#include <kmalloc.h>
#include <hashtable.h>
+#include <radix.h>
+#include <monitor.h>
+#include <kthread.h>
+#include <schedule.h>
+#include <umem.h>
+#include <ucq.h>
+#include <setjmp.h>
+#include <apipe.h>
+#include <rwlock.h>
+#include <rendez.h>
#define l1 (available_caches.l1)
#define l2 (available_caches.l2)
#define l3 (available_caches.l3)
-#ifdef __i386__
+#ifdef CONFIG_X86
void test_ipi_sending(void)
{
send_self_ipi(I_TESTING);
udelay(3000000);
cprintf("\nCORE 0 sending ipi to physical 1\n");
- send_ipi(get_hw_coreid(0x01), I_TESTING);
+ send_ipi(0x01, I_TESTING);
udelay(3000000);
cprintf("\nCORE 0 sending ipi to physical 2\n");
- send_ipi(get_hw_coreid(0x02), I_TESTING);
+ send_ipi(0x02, I_TESTING);
udelay(3000000);
cprintf("\nCORE 0 sending ipi to physical 3\n");
- send_ipi(get_hw_coreid(0x03), I_TESTING);
+ send_ipi(0x03, I_TESTING);
udelay(3000000);
cprintf("\nCORE 0 sending ipi to physical 15\n");
- send_ipi(get_hw_coreid(0x0f), I_TESTING);
+ send_ipi(0x0f, I_TESTING);
udelay(3000000);
cprintf("\nCORE 0 sending ipi to logical 2\n");
send_group_ipi(0x02, I_TESTING);
udelay(3000000);
}
-#endif // __i386__
+#endif // CONFIG_X86
void test_print_info(void)
cprintf("Contents of the page free list:\n");
for(int i=0; i<llc_cache->num_colors; i++) {
cprintf(" COLOR %d:\n", i);
- LIST_FOREACH(page, &colored_page_free_list[i], page_link) {
+ LIST_FOREACH(page, &colored_page_free_list[i], pg_link) {
cprintf(" Page: %d\n", page2ppn(page));
}
}
checklist_t *RO the_global_list;
-void test_checklist_handler(trapframe_t *tf, void* data)
+static void test_checklist_handler(struct hw_trapframe *hw_tf, void *data)
{
udelay(1000000);
cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
atomic_t a, b, c;
-#ifdef __IVY__
-void test_incrementer_handler(trapframe_t *tf, atomic_t *data)
-#else
-void test_incrementer_handler(trapframe_t *tf, void *data)
-#endif
+static void test_incrementer_handler(struct hw_trapframe *tf, void *data)
{
assert(data);
atomic_inc(data);
}
-void test_null_handler(trapframe_t *tf, void* data)
+static void test_null_handler(struct hw_trapframe *tf, void *data)
{
asm volatile("nop");
}
printk("Done\n");
}
-#ifdef __i386__
+#ifdef CONFIG_X86
void test_lapic_status_bit(void)
{
register_interrupt_handler(interrupt_handlers, I_TESTING,
atomic_set(&a,0);
printk("IPIs received (should be 0): %d\n", a);
for(int i = 0; i < NUM_IPI; i++) {
- send_ipi(get_hw_coreid(7), I_TESTING);
+ send_ipi(7, I_TESTING);
lapic_wait_to_send();
}
// need to wait a bit to let those IPIs get there
printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
// hopefully that handler never fires again. leaving it registered for now.
}
-#endif // __i386__
+#endif // CONFIG_X86
/************************************************************/
/* ISR Handler Functions */
-void test_hello_world_handler(trapframe_t *tf, void* data)
+void test_hello_world_handler(struct hw_trapframe *hw_tf, void *data)
{
int trapno;
- #if defined(__i386__)
- trapno = tf->tf_trapno;
- #elif defined(__sparc_v8__)
- trapno = (tf->tbr >> 4) & 0xFF;
+ #if defined(CONFIG_X86)
+ trapno = hw_tf->tf_trapno;
#else
trapno = 0;
#endif
- cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
- trapno, core_id(), tf);
+ cprintf("Incoming IRQ, ISR: %d on core %d with tf at %p\n",
+ trapno, core_id(), hw_tf);
}
-spinlock_t print_info_lock = SPINLOCK_INITIALIZER;
+spinlock_t print_info_lock = SPINLOCK_INITIALIZER_IRQSAVE;
-void test_print_info_handler(trapframe_t *tf, void* data)
+void test_print_info_handler(struct hw_trapframe *hw_tf, void *data)
{
uint64_t tsc = read_tsc();
cprintf("----------------------------\n");
cprintf("This is Core %d\n", core_id());
cprintf("Timestamp = %lld\n", tsc);
-#ifdef __i386__
+#ifdef CONFIG_X86
cprintf("Hardware core %d\n", hw_core_id());
cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
read_msr(0x20c), read_msr(0x20d));
cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
read_msr(0x20e), read_msr(0x20f));
-#endif // __i386__
+#endif // CONFIG_X86
cprintf("----------------------------\n");
spin_unlock_irqsave(&print_info_lock);
}
-void test_barrier_handler(trapframe_t *tf, void* data)
+void test_barrier_handler(struct hw_trapframe *hw_tf, void *data)
{
cprintf("Round 1: Core %d\n", core_id());
waiton_barrier(&test_cpu_array);
//cprintf("Round 4: Core %d\n", core_id());
}
-#ifdef __IVY__
-static void test_waiting_handler(trapframe_t *tf, atomic_t *data)
-#else
-static void test_waiting_handler(trapframe_t *tf, void *data)
-#endif
+static void test_waiting_handler(struct hw_trapframe *hw_tf, void *data)
{
atomic_dec(data);
}
-#ifdef __i386__
+#ifdef CONFIG_X86
void test_pit(void)
{
cprintf("Starting test for PIT now (10s)\n");
return;
}
-void test_km_handler(trapframe_t* tf, uint32_t srcid, void *a0, void *a1,
- void *a2)
+static void test_km_handler(uint32_t srcid, long a0, long a1, long a2)
{
- printk("Received KM on core %d from core %d: arg0= 0x%08x, arg1 = "
- "0x%08x, arg2 = 0x%08x\n", core_id(), srcid, a0, a1, a2);
+ printk("Received KM on core %d from core %d: arg0= %p, arg1 = %p, "
+ "arg2 = %p\n", core_id(), srcid, a0, a1, a2);
return;
}
* precendence (the immediates should trump the others) */
printk("sending 5 IMMED to core 1, sending (#,deadbeef,0)\n");
for (int i = 0; i < 5; i++)
- send_kernel_message(1, test_km_handler, (void*)i, (void*)0xdeadbeef,
- (void*)0, KMSG_IMMEDIATE);
+ send_kernel_message(1, test_km_handler, (long)i, 0xdeadbeef, 0,
+ KMSG_IMMEDIATE);
udelay(5000000);
printk("sending 5 routine to core 1, sending (#,cafebabe,0)\n");
for (int i = 0; i < 5; i++)
- send_kernel_message(1, test_km_handler, (void*)i, (void*)0xcafebabe,
- (void*)0, KMSG_ROUTINE);
+ send_kernel_message(1, test_km_handler, (long)i, 0xcafebabe, 0,
+ KMSG_ROUTINE);
udelay(5000000);
printk("sending 10 routine and 3 immediate to core 2\n");
for (int i = 0; i < 10; i++)
- send_kernel_message(2, test_km_handler, (void*)i, (void*)0xcafebabe,
- (void*)0, KMSG_ROUTINE);
+ send_kernel_message(2, test_km_handler, (long)i, 0xcafebabe, 0,
+ KMSG_ROUTINE);
for (int i = 0; i < 3; i++)
- send_kernel_message(2, test_km_handler, (void*)i, (void*)0xdeadbeef,
- (void*)0, KMSG_IMMEDIATE);
+ send_kernel_message(2, test_km_handler, (long)i, 0xdeadbeef, 0,
+ KMSG_IMMEDIATE);
udelay(5000000);
printk("sending 5 ea alternating to core 2\n");
for (int i = 0; i < 5; i++) {
- send_kernel_message(2, test_km_handler, (void*)i, (void*)0xdeadbeef,
- (void*)0, KMSG_IMMEDIATE);
- send_kernel_message(2, test_km_handler, (void*)i, (void*)0xcafebabe,
- (void*)0, KMSG_ROUTINE);
+ send_kernel_message(2, test_km_handler, (long)i, 0xdeadbeef, 0,
+ KMSG_IMMEDIATE);
+ send_kernel_message(2, test_km_handler, (long)i, 0xcafebabe, 0,
+ KMSG_ROUTINE);
}
udelay(5000000);
return;
}
-#endif // __i386__
-
+#endif // CONFIG_X86
static void test_single_cache(int iters, size_t size, int align, int flags,
void (*ctor)(void *, size_t),
void (*dtor)(void *, size_t))
struct test tstruct[10];
struct hashtable *h;
- int k = 5;
+ uintptr_t k = 5;
struct test *v = &tstruct[0];
h = create_hashtable(32, __generic_hash, __generic_eq);
/* Ghetto test, only tests one prod or consumer at a time */
void test_bcq(void)
{
+ /* Tests a basic struct */
struct my_struct {
int x;
int y;
bcq_dequeue(&t_bcq, &out_struct, 16);
printk("out x %d. out y %d\n", out_struct.x, out_struct.y);
- DEFINE_BCQ_TYPES(my, int, 8);
+ /* Tests the BCQ a bit more, esp with overflow */
+ #define NR_ELEM_A_BCQ 8 /* NOTE: this must be a power of 2! */
+ DEFINE_BCQ_TYPES(my, int, NR_ELEM_A_BCQ);
struct my_bcq a_bcq;
- bcq_init(&a_bcq, int, 8);
+ bcq_init(&a_bcq, int, NR_ELEM_A_BCQ);
int y = 2;
int output[100];
int retval[100];
-
+
+ /* Helpful debugger */
+ void print_a_bcq(struct my_bcq *bcq)
+ {
+ printk("A BCQ (made of ints): %p\n", bcq);
+ printk("\tprod_idx: %p\n", bcq->hdr.prod_idx);
+ printk("\tcons_pub_idx: %p\n", bcq->hdr.cons_pub_idx);
+ printk("\tcons_pvt_idx: %p\n", bcq->hdr.cons_pvt_idx);
+ for (int i = 0; i < NR_ELEM_A_BCQ; i++) {
+ printk("Element %d, rdy_for_cons: %02p\n", i,
+ bcq->wraps[i].rdy_for_cons);
+ }
+ }
+
+ /* Put in more than it can take */
for (int i = 0; i < 15; i++) {
y = i;
- retval[i] = bcq_enqueue(&a_bcq, &y, 8, 10);
+ retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
printk("enqueued: %d, had retval %d \n", y, retval[i]);
}
+ //print_a_bcq(&a_bcq);
+ /* Try to dequeue more than we put in */
for (int i = 0; i < 15; i++) {
- retval[i] = bcq_dequeue(&a_bcq, &output[i], 8);
+ retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
printk("dequeued: %d with retval %d\n", output[i], retval[i]);
}
+ //print_a_bcq(&a_bcq);
+ /* Put in some it should be able to take */
for (int i = 0; i < 3; i++) {
y = i;
- retval[i] = bcq_enqueue(&a_bcq, &y, 8, 10);
+ retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
printk("enqueued: %d, had retval %d \n", y, retval[i]);
}
+ /* Take those, and then a couple extra */
for (int i = 0; i < 5; i++) {
- retval[i] = bcq_dequeue(&a_bcq, &output[i], 8);
+ retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
printk("dequeued: %d with retval %d\n", output[i], retval[i]);
}
+ /* Try some one-for-one */
for (int i = 0; i < 5; i++) {
y = i;
- retval[i] = bcq_enqueue(&a_bcq, &y, 8, 10);
+ retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
printk("enqueued: %d, had retval %d \n", y, retval[i]);
- retval[i] = bcq_dequeue(&a_bcq, &output[i], 8);
+ retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
printk("dequeued: %d with retval %d\n", output[i], retval[i]);
}
-
+}
+
+/* Test a simple concurrent send and receive (one prod, one cons). We spawn a
+ * process that will go into _M mode on another core, and we'll do the test from
+ * an alarm handler run on our core. When we start up the process, we won't
+ * return so we need to defer the work with an alarm. */
+void test_ucq(void)
+{
+ struct timer_chain *tchain = &per_cpu_info[core_id()].tchain;
+ struct alarm_waiter *waiter = kmalloc(sizeof(struct alarm_waiter), 0);
+
+ /* Alarm handler: what we want to do after the process is up */
+ void send_msgs(struct alarm_waiter *waiter)
+ {
+ struct timer_chain *tchain;
+ struct proc *old_proc, *p = waiter->data;
+ struct ucq *ucq = (struct ucq*)USTACKTOP;
+ struct event_msg msg;
+
+ printk("Running the alarm handler!\n");
+ printk("NR msg per page: %d\n", NR_MSG_PER_PAGE);
+ /* might not be mmaped yet, if not, abort */
+ if (!user_mem_check(p, ucq, PGSIZE, 1, PTE_USER_RW)) {
+ printk("Not mmaped yet\n");
+ goto abort;
+ }
+ /* load their address space */
+ old_proc = switch_to(p);
+ /* So it's mmaped, see if it is ready (note that this is dangerous) */
+ if (!ucq->ucq_ready) {
+ printk("Not ready yet\n");
+ switch_back(p, old_proc);
+ goto abort;
+ }
+ /* So it's ready, time to finally do the tests... */
+ printk("[kernel] Finally starting the tests... \n");
+ /* 1: Send a simple message */
+ printk("[kernel] #1 Sending simple message (7, deadbeef)\n");
+ msg.ev_type = 7;
+ msg.ev_arg2 = 0xdeadbeef;
+ send_ucq_msg(ucq, p, &msg);
+ printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
+ /* 2: Send a bunch. In a VM, this causes one swap, and then a bunch of
+ * mmaps. */
+ printk("[kernel] #2 \n");
+ for (int i = 0; i < 5000; i++) {
+ msg.ev_type = i;
+ send_ucq_msg(ucq, p, &msg);
+ }
+ printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
+ printk("[kernel] #3 \n");
+ /* 3: make sure we chained pages (assuming 1k is enough) */
+ for (int i = 0; i < 1000; i++) {
+ msg.ev_type = i;
+ send_ucq_msg(ucq, p, &msg);
+ }
+ printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
+ /* other things we could do:
+ * - concurrent producers / consumers... ugh.
+ * - would require a kmsg to another core, instead of a local alarm
+ */
+ /* done, switch back and free things */
+ switch_back(p, old_proc);
+ proc_decref(p);
+ kfree(waiter); /* since it was kmalloc()d */
+ return;
+ abort:
+ tchain = &per_cpu_info[core_id()].tchain;
+ /* Set to run again */
+ set_awaiter_rel(waiter, 1000000);
+ set_alarm(tchain, waiter);
+ }
+ /* Set up a handler to run the real part of the test */
+ init_awaiter(waiter, send_msgs);
+ set_awaiter_rel(waiter, 1000000); /* 1s should be long enough */
+ set_alarm(tchain, waiter);
+ /* Just spawn the program */
+ struct file *program;
+ program = do_file_open("/bin/ucq", 0, 0);
+ if (!program) {
+ printk("Unable to find /bin/ucq!\n");
+ return;
+ }
+ char *p_envp[] = {"LD_LIBRARY_PATH=/lib", 0};
+ struct proc *p = proc_create(program, 0, p_envp);
+ proc_wakeup(p);
+ /* instead of getting rid of the reference created in proc_create, we'll put
+ * it in the awaiter */
+ waiter->data = p;
+ kref_put(&program->f_kref);
+ /* Should never return from schedule (env_pop in there) also note you may
+ * not get the process you created, in the event there are others floating
+ * around that are runnable */
+ schedule();
+ smp_idle();
+ assert(0);
}
/* rudimentary tests. does the basics, create, merge, split, etc. Feel free to
shrink_vmr(vmrs[2], 0x9000);
results[1].base = 0x8000;
results[1].end = 0x9000;
- check_vmrs(p, results, 2, n++);
+ check_vmrs(p, results, 2, n++); /* 10 */
if (vmrs[2] != find_vmr(p, 0x8500))
- printk("Failed to find the right vmr!");
+ printk("Failed to find the right vmr!\n");
if (vmrs[2] != find_first_vmr(p, 0x8500))
- printk("Failed to find the right vmr!");
+ printk("Failed to find the right vmr!\n");
if (vmrs[2] != find_first_vmr(p, 0x7500))
- printk("Failed to find the right vmr!");
+ printk("Failed to find the right vmr!\n");
if (find_first_vmr(p, 0x9500))
printk("Found a vmr when we shouldn't!\n");
/* grow up to another */
grow_vmr(vmrs[0], 0x8000);
results[0].end = 0x8000;
check_vmrs(p, results, 2, n++);
- vmrs[0]->vm_perm = 88;
- vmrs[2]->vm_perm = 77;
+ vmrs[0]->vm_prot = 88;
+ vmrs[2]->vm_prot = 77;
/* should be unmergeable due to perms */
if (-1 != merge_vmr(vmrs[0], vmrs[2]))
printk("Bad merge test failed\n");
check_vmrs(p, results, 2, n++);
/* should merge now */
- vmrs[2]->vm_perm = 88;
+ vmrs[2]->vm_prot = 88;
merge_vmr(vmrs[0], vmrs[2]);
results[0].end = 0x9000;
check_vmrs(p, results, 1, n++);
+ destroy_vmr(vmrs[0]);
+ check_vmrs(p, results, 0, n++);
+ /* Check the automerge function */
+ vmrs[0] = create_vmr(p, 0x2000, 0x1000);
+ vmrs[1] = create_vmr(p, 0x3000, 0x1000);
+ vmrs[2] = create_vmr(p, 0x4000, 0x1000);
+ for (int i = 0; i < 3; i++) {
+ vmrs[i]->vm_prot = PROT_READ;
+ vmrs[i]->vm_flags = 0;
+ vmrs[i]->vm_file = 0; /* would like to test this, it's a pain for now */
+ }
+ vmrs[0] = merge_me(vmrs[1]);
+ results[0].base = 0x2000;
+ results[0].end = 0x5000;
+ check_vmrs(p, results, 1, n++);
+ destroy_vmr(vmrs[0]);
+ check_vmrs(p, results, 0, n++);
+ /* Check unfixed creation requests */
+ vmrs[0] = create_vmr(p, 0x0000, 0x1000);
+ vmrs[1] = create_vmr(p, 0x0000, 0x1000);
+ vmrs[2] = create_vmr(p, 0x0000, 0x1000);
+ results[0].base = 0x0000;
+ results[0].end = 0x1000;
+ results[1].base = 0x1000;
+ results[1].end = 0x2000;
+ results[2].base = 0x2000;
+ results[2].end = 0x3000;
+ check_vmrs(p, results, 3, n++);
printk("Finished vm_regions test!\n");
}
+
+void test_radix_tree(void)
+{
+ struct radix_tree real_tree = RADIX_INITIALIZER;
+ struct radix_tree *tree = &real_tree;
+ void *retval;
+
+ if (radix_insert(tree, 0, (void*)0xdeadbeef))
+ printk("Failed to insert at 0!\n");
+ radix_delete(tree, 0);
+ if (radix_insert(tree, 0, (void*)0xdeadbeef))
+ printk("Failed to re-insert at 0!\n");
+
+ if (radix_insert(tree, 3, (void*)0xdeadbeef))
+ printk("Failed to insert first!\n");
+ radix_insert(tree, 4, (void*)0x04040404);
+ assert((void*)0xdeadbeef == radix_lookup(tree, 3));
+ for (int i = 5; i < 100; i++)
+ if ((retval = radix_lookup(tree, i))) {
+ printk("Extra item %p at slot %d in tree %p\n", retval, i,
+ tree);
+ print_radix_tree(tree);
+ monitor(0);
+ }
+ if (radix_insert(tree, 65, (void*)0xcafebabe))
+ printk("Failed to insert a two-tier!\n");
+ if (!radix_insert(tree, 4, (void*)0x03030303))
+ printk("Should not let us reinsert\n");
+ if (radix_insert(tree, 4095, (void*)0x4095))
+ printk("Failed to insert a two-tier boundary!\n");
+ if (radix_insert(tree, 4096, (void*)0x4096))
+ printk("Failed to insert a three-tier!\n");
+ //print_radix_tree(tree);
+ radix_delete(tree, 65);
+ radix_delete(tree, 3);
+ radix_delete(tree, 4);
+ radix_delete(tree, 4095);
+ radix_delete(tree, 4096);
+ //print_radix_tree(tree);
+ printk("Finished radix tree tests!\n");
+}
+
+/* Assorted FS tests, which were hanging around in init.c */
+void test_random_fs(void)
+{
+ int retval = do_symlink("/dir1/sym", "/bin/hello", S_IRWXU);
+ if (retval)
+ printk("symlink1 creation failed\n");
+ retval = do_symlink("/symdir", "/dir1/dir1-1", S_IRWXU);
+ if (retval)
+ printk("symlink1 creation failed\n");
+ retval = do_symlink("/dir1/test.txt", "/dir2/test2.txt", S_IRWXU);
+ if (retval)
+ printk("symlink2 creation failed\n");
+ retval = do_symlink("/dir1/dir1-1/up", "../../", S_IRWXU);
+ if (retval)
+ printk("symlink3 creation failed\n");
+ retval = do_symlink("/bin/hello-sym", "hello", S_IRWXU);
+ if (retval)
+ printk("symlink4 creation failed\n");
+
+ struct dentry *dentry;
+ struct nameidata nd_r = {0}, *nd = &nd_r;
+ retval = path_lookup("/dir1/sym", 0, nd);
+ if (retval)
+ printk("symlink lookup failed: %d\n", retval);
+ char *symname = nd->dentry->d_inode->i_op->readlink(nd->dentry);
+ printk("Pathlookup got %s (sym)\n", nd->dentry->d_name.name);
+ if (!symname)
+ printk("symlink reading failed\n");
+ else
+ printk("Symname: %s (/bin/hello)\n", symname);
+ path_release(nd);
+ /* try with follow */
+ memset(nd, 0, sizeof(struct nameidata));
+ retval = path_lookup("/dir1/sym", LOOKUP_FOLLOW, nd);
+ if (retval)
+ printk("symlink lookup failed: %d\n", retval);
+ printk("Pathlookup got %s (hello)\n", nd->dentry->d_name.name);
+ path_release(nd);
+
+ /* try with a directory */
+ memset(nd, 0, sizeof(struct nameidata));
+ retval = path_lookup("/symdir/f1-1.txt", 0, nd);
+ if (retval)
+ printk("symlink lookup failed: %d\n", retval);
+ printk("Pathlookup got %s (f1-1.txt)\n", nd->dentry->d_name.name);
+ path_release(nd);
+
+ /* try with a rel path */
+ printk("Try with a rel path\n");
+ memset(nd, 0, sizeof(struct nameidata));
+ retval = path_lookup("/symdir/up/hello.txt", 0, nd);
+ if (retval)
+ printk("symlink lookup failed: %d\n", retval);
+ printk("Pathlookup got %s (hello.txt)\n", nd->dentry->d_name.name);
+ path_release(nd);
+
+ printk("Try for an ELOOP\n");
+ memset(nd, 0, sizeof(struct nameidata));
+ retval = path_lookup("/symdir/up/symdir/up/symdir/up/symdir/up/hello.txt", 0, nd);
+ if (retval)
+ printk("Symlink lookup failed (it should): %d (-40)\n", retval);
+ path_release(nd);
+}
+
+/* Kernel message to restart our kthread */
+static void __test_up_sem(uint32_t srcid, long a0, long a1, long a2)
+{
+ struct semaphore *sem = (struct semaphore*)a0;
+ printk("[kmsg] Upping the sem to start the kthread, stacktop is %p\n",
+ get_stack_top());
+ if (!sem_up(sem)) {
+ printk("[kmsg] Crap, the sem didn't have a kthread waiting!\n");
+ return;
+ }
+ printk("Kthread will restart when we handle the __launch RKM\n");
+}
+
+/* simple test - start one, do something else, and resume it. For lack of a
+ * better infrastructure, we send ourselves a kmsg to run the kthread, which
+ * we'll handle in smp_idle (which you may have to manually call). Note this
+ * doesn't test things like memory being leaked, or dealing with processes. */
+void test_kthreads(void)
+{
+ struct semaphore sem;
+ sem_init(&sem, 1); /* set to 1 to test the unwind */
+ printk("We're a kthread! Stacktop is %p. Testing suspend, etc...\n",
+ get_stack_top());
+ /* So we have something that will wake us up. Routine messages won't get
+ * serviced in the kernel right away. */
+ send_kernel_message(core_id(), __test_up_sem, (long)&sem, 0, 0,
+ KMSG_ROUTINE);
+ /* Actually block (or try to) */
+ /* This one shouldn't block - but will test the unwind (if 1 above) */
+ printk("About to sleep, but should unwind (signal beat us)\n");
+ sem_down(&sem);
+ /* This one is for real, yo. Run and tell that. */
+ printk("About to sleep for real\n");
+ sem_down(&sem);
+ printk("Kthread restarted!, Stacktop is %p.\n", get_stack_top());
+}
+
+/* Second player's kmsg */
+static void __test_kref_2(uint32_t srcid, long a0, long a1, long a2)
+{
+ struct kref *kref = (struct kref*)a0;
+ bool *done = (bool*)a1;
+ enable_irq();
+ for (int i = 0; i < 10000000; i++) {
+ kref_get(kref, 1);
+ set_core_timer(1, TRUE);
+ udelay(2);
+ kref_put(kref);
+ }
+ *done = TRUE;
+}
+
+/* Runs a simple test between core 0 (caller) and core 2 */
+void test_kref(void)
+{
+ struct kref local_kref;
+ bool done = FALSE;
+
+ kref_init(&local_kref, fake_release, 1);
+ send_kernel_message(2, __test_kref_2, (long)&local_kref, (long)&done, 0,
+ KMSG_ROUTINE);
+ for (int i = 0; i < 10000000; i++) {
+ kref_get(&local_kref, 1);
+ udelay(2);
+ kref_put(&local_kref);
+ }
+ while (!done)
+ cpu_relax();
+ assert(kref_refcnt(&local_kref) == 1);
+ printk("[TEST-KREF] Simple 2-core getting/putting passed.\n");
+}
+
+void test_atomics(void)
+{
+ /* subtract_and_test */
+ atomic_t num;
+ /* Test subing to 0 */
+ atomic_init(&num, 1);
+ assert(atomic_sub_and_test(&num, 1) == 1);
+ atomic_init(&num, 2);
+ assert(atomic_sub_and_test(&num, 2) == 1);
+ /* Test not getting to 0 */
+ atomic_init(&num, 1);
+ assert(atomic_sub_and_test(&num, 0) == 0);
+ atomic_init(&num, 2);
+ assert(atomic_sub_and_test(&num, 1) == 0);
+ /* Test negatives */
+ atomic_init(&num, -1);
+ assert(atomic_sub_and_test(&num, 1) == 0);
+ atomic_init(&num, -1);
+ assert(atomic_sub_and_test(&num, -1) == 1);
+ /* Test larger nums */
+ atomic_init(&num, 265);
+ assert(atomic_sub_and_test(&num, 265) == 1);
+ atomic_init(&num, 265);
+ assert(atomic_sub_and_test(&num, 2) == 0);
+
+ /* CAS */
+ /* Simple test, make sure the bool retval of CAS handles failure */
+ void test_cas_val(long init_val)
+ {
+ atomic_t actual_num;
+ long old_num;
+ int attempt;
+ atomic_init(&actual_num, init_val);
+ attempt = 0;
+ do {
+ old_num = atomic_read(&actual_num);
+ /* First time, try to fail */
+ if (attempt == 0)
+ old_num++;
+ attempt++;
+ } while (!atomic_cas(&actual_num, old_num, old_num + 10));
+ if (atomic_read(&actual_num) != init_val + 10)
+ printk("FUCK, CAS test failed for %d\n", init_val);
+ }
+ test_cas_val(257);
+ test_cas_val(1);
+}
+
+/* Helper KMSG for test_abort. Core 1 does this, while core 0 sends an IRQ. */
+static void __test_try_halt(uint32_t srcid, long a0, long a1, long a2)
+{
+ disable_irq();
+ /* wait 10 sec. should have a bunch of ints pending */
+ udelay(10000000);
+ printk("Core 1 is about to halt\n");
+ cpu_halt();
+ printk("Returned from halting on core 1\n");
+}
+
+/* x86 test, making sure our cpu_halt() and irq_handler() work. If you want to
+ * see it fail, you'll probably need to put a nop in the asm for cpu_halt(), and
+ * comment out abort_halt() in irq_handler(). */
+void test_abort_halt(void)
+{
+#ifdef CONFIG_X86
+ send_kernel_message(1, __test_try_halt, 0, 0, 0, KMSG_ROUTINE);
+ /* wait 1 sec, enough time to for core 1 to be in its KMSG */
+ udelay(1000000);
+ /* Send an IPI */
+ send_ipi(0x01, I_TESTING);
+ printk("Core 0 sent the IPI\n");
+#endif /* CONFIG_X86 */
+}
+
+/* Funcs and global vars for test_cv() */
+static struct cond_var local_cv;
+static atomic_t counter;
+static struct cond_var *cv = &local_cv;
+static volatile bool state = FALSE; /* for test 3 */
+
+void __test_cv_signal(uint32_t srcid, long a0, long a1, long a2)
+{
+ if (atomic_read(&counter) % 4)
+ cv_signal(cv);
+ else
+ cv_broadcast(cv);
+ atomic_dec(&counter);
+}
+
+void __test_cv_waiter(uint32_t srcid, long a0, long a1, long a2)
+{
+ cv_lock(cv);
+ /* check state, etc */
+ cv_wait_and_unlock(cv);
+ atomic_dec(&counter);
+}
+
+void __test_cv_waiter_t3(uint32_t srcid, long a0, long a1, long a2)
+{
+ udelay(a0);
+ /* if state == false, we haven't seen the signal yet */
+ cv_lock(cv);
+ while (!state) {
+ cpu_relax();
+ cv_wait(cv); /* unlocks and relocks */
+ }
+ cv_unlock(cv);
+ /* Make sure we are done, tell the controller we are done */
+ cmb();
+ assert(state);
+ atomic_dec(&counter);
+}
+
+void test_cv(void)
+{
+ int nr_msgs;
+
+ cv_init(cv);
+ /* Test 0: signal without waiting */
+ cv_broadcast(cv);
+ cv_signal(cv);
+ kthread_yield();
+ printk("test_cv: signal without waiting complete\n");
+
+ /* Test 1: single / minimal shit */
+ nr_msgs = num_cpus - 1; /* not using cpu 0 */
+ atomic_init(&counter, nr_msgs);
+ for (int i = 1; i < num_cpus; i++)
+ send_kernel_message(i, __test_cv_waiter, 0, 0, 0, KMSG_ROUTINE);
+ udelay(1000000);
+ cv_signal(cv);
+ kthread_yield();
+ while (atomic_read(&counter) != nr_msgs - 1)
+ cpu_relax();
+ printk("test_cv: single signal complete\n");
+ cv_broadcast(cv);
+ /* broadcast probably woke up the waiters on our core. since we want to
+ * spin on their completion, we need to yield for a bit. */
+ kthread_yield();
+ while (atomic_read(&counter))
+ cpu_relax();
+ printk("test_cv: broadcast signal complete\n");
+
+ /* Test 2: shitloads of waiters and signalers */
+ nr_msgs = 0x500; /* any more than 0x20000 could go OOM */
+ atomic_init(&counter, nr_msgs);
+ for (int i = 0; i < nr_msgs; i++) {
+ int cpu = (i % (num_cpus - 1)) + 1;
+ if (atomic_read(&counter) % 5)
+ send_kernel_message(cpu, __test_cv_waiter, 0, 0, 0, KMSG_ROUTINE);
+ else
+ send_kernel_message(cpu, __test_cv_signal, 0, 0, 0, KMSG_ROUTINE);
+ }
+ kthread_yield(); /* run whatever messages we sent to ourselves */
+ while (atomic_read(&counter)) {
+ cpu_relax();
+ cv_broadcast(cv);
+ udelay(1000000);
+ kthread_yield(); /* run whatever messages we sent to ourselves */
+ }
+ assert(!cv->nr_waiters);
+ printk("test_cv: massive message storm complete\n");
+
+ /* Test 3: basic one signaller, one receiver. we want to vary the amount of
+ * time the sender and receiver delays, starting with (1ms, 0ms) and ending
+ * with (0ms, 1ms). At each extreme, such as with the sender waiting 1ms,
+ * the receiver/waiter should hit the "check and wait" point well before the
+ * sender/signaller hits the "change state and signal" point. */
+ for (int i = 0; i < 1000; i++) {
+ for (int j = 0; j < 10; j++) { /* some extra chances at each point */
+ state = FALSE;
+ atomic_init(&counter, 1); /* signal that the client is done */
+ /* client waits for i usec */
+ send_kernel_message(2, __test_cv_waiter_t3, i, 0, 0, KMSG_ROUTINE);
+ cmb();
+ udelay(1000 - i); /* senders wait time: 1000..0 */
+ state = TRUE;
+ cv_signal(cv);
+ /* signal might have unblocked a kthread, let it run */
+ kthread_yield();
+ /* they might not have run at all yet (in which case they lost the
+ * race and don't need the signal). but we need to wait til they're
+ * done */
+ while (atomic_read(&counter))
+ cpu_relax();
+ assert(!cv->nr_waiters);
+ }
+ }
+ printk("test_cv: single sender/receiver complete\n");
+}
+
+/* Based on a bug I noticed. TODO: actual memset test... */
+void test_memset(void)
+{
+ #define ARR_SZ 256
+
+ void print_array(char *c, size_t len)
+ {
+ for (int i = 0; i < len; i++)
+ printk("%04d: %02x\n", i, *c++);
+ }
+
+ void check_array(char *c, char x, size_t len)
+ {
+ for (int i = 0; i < len; i++) {
+ if (*c != x) {
+ printk("Char %d is %c (%02x), should be %c (%02x)\n", i, *c,
+ *c, x, x);
+ break;
+ }
+ c++;
+ }
+ }
+
+ void run_check(char *arr, int ch, size_t len)
+ {
+ char *c = arr;
+ for (int i = 0; i < ARR_SZ; i++)
+ *c++ = 0x0;
+ memset(arr, ch, len - 4);
+ check_array(arr, ch, len - 4);
+ check_array(arr + len - 4, 0x0, 4);
+ }
+
+ char bytes[ARR_SZ];
+ run_check(bytes, 0xfe, 20);
+ run_check(bytes, 0xc0fe, 20);
+ printk("Done!\n");
+}
+
+void __attribute__((noinline)) __longjmp_wrapper(struct jmpbuf* jb)
+{
+ asm ("");
+ printk("Starting: %s\n", __FUNCTION__);
+ longjmp(jb, 1);
+ // Should never get here
+ printk("Exiting: %s\n", __FUNCTION__);
+}
+
+void test_setjmp()
+{
+ struct jmpbuf jb;
+ printk("Starting: %s\n", __FUNCTION__);
+ if (setjmp(&jb)) {
+ printk("After second setjmp return: %s\n", __FUNCTION__);
+ }
+ else {
+ printk("After first setjmp return: %s\n", __FUNCTION__);
+ __longjmp_wrapper(&jb);
+ }
+ printk("Exiting: %s\n", __FUNCTION__);
+}
+
+void test_apipe(void)
+{
+ static struct atomic_pipe test_pipe;
+
+ struct some_struct {
+ long x;
+ int y;
+ };
+ /* Don't go too big, or you'll run off the stack */
+ #define MAX_BATCH 100
+
+ void __test_apipe_writer(uint32_t srcid, long a0, long a1, long a2)
+ {
+ int ret, count_todo;
+ int total = 0;
+ struct some_struct local_str[MAX_BATCH];
+ for (int i = 0; i < MAX_BATCH; i++) {
+ local_str[i].x = 0xf00;
+ local_str[i].y = 0xba5;
+ }
+ /* testing 0, and max out at 50. [0, ... 50] */
+ for (int i = 0; i < MAX_BATCH + 1; i++) {
+ count_todo = i;
+ while (count_todo) {
+ ret = apipe_write(&test_pipe, &local_str, count_todo);
+ /* Shouldn't break, based on the loop counters */
+ if (!ret) {
+ printk("Writer breaking with %d left\n", count_todo);
+ break;
+ }
+ total += ret;
+ count_todo -= ret;
+ }
+ }
+ printk("Writer done, added %d elems\n", total);
+ apipe_close_writer(&test_pipe);
+ }
+
+ void __test_apipe_reader(uint32_t srcid, long a0, long a1, long a2)
+ {
+ int ret, count_todo;
+ int total = 0;
+ struct some_struct local_str[MAX_BATCH] = {{0}};
+ /* reversed loop compared to the writer [50, ... 0] */
+ for (int i = MAX_BATCH; i >= 0; i--) {
+ count_todo = i;
+ while (count_todo) {
+ ret = apipe_read(&test_pipe, &local_str, count_todo);
+ if (!ret) {
+ printk("Reader breaking with %d left\n", count_todo);
+ break;
+ }
+ total += ret;
+ count_todo -= ret;
+ }
+ }
+ printk("Reader done, took %d elems\n", total);
+ for (int i = 0; i < MAX_BATCH; i++) {
+ assert(local_str[i].x == 0xf00);
+ assert(local_str[i].y == 0xba5);
+ }
+ apipe_close_reader(&test_pipe);
+ }
+
+ void *pipe_buf = kpage_alloc_addr();
+ assert(pipe_buf);
+ apipe_init(&test_pipe, pipe_buf, PGSIZE, sizeof(struct some_struct));
+ printd("*ap_buf %p\n", test_pipe.ap_buf);
+ printd("ap_ring_sz %p\n", test_pipe.ap_ring_sz);
+ printd("ap_elem_sz %p\n", test_pipe.ap_elem_sz);
+ printd("ap_rd_off %p\n", test_pipe.ap_rd_off);
+ printd("ap_wr_off %p\n", test_pipe.ap_wr_off);
+ printd("ap_nr_readers %p\n", test_pipe.ap_nr_readers);
+ printd("ap_nr_writers %p\n", test_pipe.ap_nr_writers);
+ send_kernel_message(0, __test_apipe_writer, 0, 0, 0, KMSG_ROUTINE);
+ /* Once we start synchronizing with a kmsg / kthread that could be on a
+ * different core, we run the chance of being migrated when we block. */
+ __test_apipe_reader(0, 0, 0, 0);
+ /* Wait til the first test is done */
+ while (test_pipe.ap_nr_writers) {
+ kthread_yield();
+ cpu_relax();
+ }
+ /* Try cross core (though CV wake ups schedule on the waking core) */
+ apipe_open_reader(&test_pipe);
+ apipe_open_writer(&test_pipe);
+ send_kernel_message(1, __test_apipe_writer, 0, 0, 0, KMSG_ROUTINE);
+ __test_apipe_reader(0, 0, 0, 0);
+ /* We could be on core 1 now. If we were called from core0, our caller
+ * might expect us to return while being on core 0 (like if we were kfunc'd
+ * from the monitor. Be careful if you copy this code. */
+}
+
+static struct rwlock rwlock, *rwl = &rwlock;
+static atomic_t rwlock_counter;
+void test_rwlock(void)
+{
+ bool ret;
+ rwinit(rwl);
+ /* Basic: can i lock twice, recursively? */
+ rlock(rwl);
+ ret = canrlock(rwl);
+ assert(ret);
+ runlock(rwl);
+ runlock(rwl);
+ /* Other simply tests */
+ wlock(rwl);
+ wunlock(rwl);
+
+ /* Just some half-assed different operations */
+ void __test_rwlock(uint32_t srcid, long a0, long a1, long a2)
+ {
+ int rand = read_tsc() & 0xff;
+ for (int i = 0; i < 10000; i++) {
+ switch ((rand * i) % 5) {
+ case 0:
+ case 1:
+ rlock(rwl);
+ runlock(rwl);
+ break;
+ case 2:
+ case 3:
+ if (canrlock(rwl))
+ runlock(rwl);
+ break;
+ case 4:
+ wlock(rwl);
+ wunlock(rwl);
+ break;
+ }
+ }
+ /* signal to allow core 0 to finish */
+ atomic_dec(&rwlock_counter);
+ }
+
+ /* send 4 messages to each non core 0 */
+ atomic_init(&rwlock_counter, (num_cpus - 1) * 4);
+ for (int i = 1; i < num_cpus; i++)
+ for (int j = 0; j < 4; j++)
+ send_kernel_message(i, __test_rwlock, 0, 0, 0, KMSG_ROUTINE);
+ while (atomic_read(&rwlock_counter))
+ cpu_relax();
+ printk("rwlock test complete\n");
+}
+
+/* Funcs and global vars for test_rv() */
+static struct rendez local_rv;
+static struct rendez *rv = &local_rv;
+/* reusing state and counter from test_cv... */
+
+static int __rendez_cond(void *arg)
+{
+ return *(bool*)arg;
+}
+
+void __test_rv_wakeup(uint32_t srcid, long a0, long a1, long a2)
+{
+ if (atomic_read(&counter) % 4)
+ cv_signal(cv);
+ else
+ cv_broadcast(cv);
+ atomic_dec(&counter);
+}
+
+void __test_rv_sleeper(uint32_t srcid, long a0, long a1, long a2)
+{
+ rendez_sleep(rv, __rendez_cond, (void*)&state);
+ atomic_dec(&counter);
+}
+
+void __test_rv_sleeper_timeout(uint32_t srcid, long a0, long a1, long a2)
+{
+ /* half-assed amount of time. */
+ rendez_sleep_timeout(rv, __rendez_cond, (void*)&state, a0);
+ atomic_dec(&counter);
+}
+
+void test_rv(void)
+{
+ int nr_msgs;
+
+ rendez_init(rv);
+ /* Test 0: signal without waiting */
+ rendez_wakeup(rv);
+ kthread_yield();
+ printk("test_rv: wakeup without sleeping complete\n");
+
+ /* Test 1: a few sleepers */
+ nr_msgs = num_cpus - 1; /* not using cpu 0 */
+ atomic_init(&counter, nr_msgs);
+ state = FALSE;
+ for (int i = 1; i < num_cpus; i++)
+ send_kernel_message(i, __test_rv_sleeper, 0, 0, 0, KMSG_ROUTINE);
+ udelay(1000000);
+ cmb();
+ state = TRUE;
+ rendez_wakeup(rv);
+ /* broadcast probably woke up the waiters on our core. since we want to
+ * spin on their completion, we need to yield for a bit. */
+ kthread_yield();
+ while (atomic_read(&counter))
+ cpu_relax();
+ printk("test_rv: bulk wakeup complete\n");
+
+ /* Test 2: different types of sleepers / timeouts */
+ state = FALSE;
+ nr_msgs = 0x500; /* any more than 0x20000 could go OOM */
+ atomic_init(&counter, nr_msgs);
+ for (int i = 0; i < nr_msgs; i++) {
+ int cpu = (i % (num_cpus - 1)) + 1;
+ /* timeouts from 0ms ..5000ms (enough that they should wake via cond */
+ if (atomic_read(&counter) % 5)
+ send_kernel_message(cpu, __test_rv_sleeper_timeout, i * 4, 0, 0,
+ KMSG_ROUTINE);
+ else
+ send_kernel_message(cpu, __test_rv_sleeper, 0, 0, 0, KMSG_ROUTINE);
+ }
+ kthread_yield(); /* run whatever messages we sent to ourselves */
+ state = TRUE;
+ while (atomic_read(&counter)) {
+ cpu_relax();
+ rendez_wakeup(rv);
+ udelay(1000000);
+ kthread_yield(); /* run whatever messages we sent to ourselves */
+ }
+ assert(!rv->cv.nr_waiters);
+ printk("test_rv: lots of sleepers/timeouts complete\n");
+}