#include <umem.h>
#include <ucq.h>
#include <setjmp.h>
+#include <apipe.h>
+#include <rwlock.h>
+#include <rendez.h>
#define l1 (available_caches.l1)
#define l2 (available_caches.l2)
}
/* Funcs and global vars for test_cv() */
-struct cond_var local_cv;
-atomic_t counter;
-struct cond_var *cv = &local_cv;
-volatile bool state = FALSE; /* for test 3 */
+static struct cond_var local_cv;
+static atomic_t counter;
+static struct cond_var *cv = &local_cv;
+static volatile bool state = FALSE; /* for test 3 */
void __test_cv_signal(uint32_t srcid, long a0, long a1, long a2)
{
printk("Exiting: %s\n", __FUNCTION__);
}
+void test_apipe(void)
+{
+ static struct atomic_pipe test_pipe;
+
+ struct some_struct {
+ long x;
+ int y;
+ };
+ /* Don't go too big, or you'll run off the stack */
+ #define MAX_BATCH 100
+
+ void __test_apipe_writer(uint32_t srcid, long a0, long a1, long a2)
+ {
+ int ret, count_todo;
+ int total = 0;
+ struct some_struct local_str[MAX_BATCH];
+ for (int i = 0; i < MAX_BATCH; i++) {
+ local_str[i].x = 0xf00;
+ local_str[i].y = 0xba5;
+ }
+ /* testing 0, and max out at 50. [0, ... 50] */
+ for (int i = 0; i < MAX_BATCH + 1; i++) {
+ count_todo = i;
+ while (count_todo) {
+ ret = apipe_write(&test_pipe, &local_str, count_todo);
+ /* Shouldn't break, based on the loop counters */
+ if (!ret) {
+ printk("Writer breaking with %d left\n", count_todo);
+ break;
+ }
+ total += ret;
+ count_todo -= ret;
+ }
+ }
+ printk("Writer done, added %d elems\n", total);
+ apipe_close_writer(&test_pipe);
+ }
+
+ void __test_apipe_reader(uint32_t srcid, long a0, long a1, long a2)
+ {
+ int ret, count_todo;
+ int total = 0;
+ struct some_struct local_str[MAX_BATCH] = {{0}};
+ /* reversed loop compared to the writer [50, ... 0] */
+ for (int i = MAX_BATCH; i >= 0; i--) {
+ count_todo = i;
+ while (count_todo) {
+ ret = apipe_read(&test_pipe, &local_str, count_todo);
+ if (!ret) {
+ printk("Reader breaking with %d left\n", count_todo);
+ break;
+ }
+ total += ret;
+ count_todo -= ret;
+ }
+ }
+ printk("Reader done, took %d elems\n", total);
+ for (int i = 0; i < MAX_BATCH; i++) {
+ assert(local_str[i].x == 0xf00);
+ assert(local_str[i].y == 0xba5);
+ }
+ apipe_close_reader(&test_pipe);
+ }
+
+ void *pipe_buf = kpage_alloc_addr();
+ assert(pipe_buf);
+ apipe_init(&test_pipe, pipe_buf, PGSIZE, sizeof(struct some_struct));
+ printd("*ap_buf %p\n", test_pipe.ap_buf);
+ printd("ap_ring_sz %p\n", test_pipe.ap_ring_sz);
+ printd("ap_elem_sz %p\n", test_pipe.ap_elem_sz);
+ printd("ap_rd_off %p\n", test_pipe.ap_rd_off);
+ printd("ap_wr_off %p\n", test_pipe.ap_wr_off);
+ printd("ap_nr_readers %p\n", test_pipe.ap_nr_readers);
+ printd("ap_nr_writers %p\n", test_pipe.ap_nr_writers);
+ send_kernel_message(0, __test_apipe_writer, 0, 0, 0, KMSG_ROUTINE);
+ /* Once we start synchronizing with a kmsg / kthread that could be on a
+ * different core, we run the chance of being migrated when we block. */
+ __test_apipe_reader(0, 0, 0, 0);
+ /* Wait til the first test is done */
+ while (test_pipe.ap_nr_writers) {
+ kthread_yield();
+ cpu_relax();
+ }
+ /* Try cross core (though CV wake ups schedule on the waking core) */
+ apipe_open_reader(&test_pipe);
+ apipe_open_writer(&test_pipe);
+ send_kernel_message(1, __test_apipe_writer, 0, 0, 0, KMSG_ROUTINE);
+ __test_apipe_reader(0, 0, 0, 0);
+ /* We could be on core 1 now. If we were called from core0, our caller
+ * might expect us to return while being on core 0 (like if we were kfunc'd
+ * from the monitor. Be careful if you copy this code. */
+}
+
+static struct rwlock rwlock, *rwl = &rwlock;
+static atomic_t rwlock_counter;
+void test_rwlock(void)
+{
+ bool ret;
+ rwinit(rwl);
+ /* Basic: can i lock twice, recursively? */
+ rlock(rwl);
+ ret = canrlock(rwl);
+ assert(ret);
+ runlock(rwl);
+ runlock(rwl);
+ /* Other simply tests */
+ wlock(rwl);
+ wunlock(rwl);
+
+ /* Just some half-assed different operations */
+ void __test_rwlock(uint32_t srcid, long a0, long a1, long a2)
+ {
+ int rand = read_tsc() & 0xff;
+ for (int i = 0; i < 10000; i++) {
+ switch ((rand * i) % 5) {
+ case 0:
+ case 1:
+ rlock(rwl);
+ runlock(rwl);
+ break;
+ case 2:
+ case 3:
+ if (canrlock(rwl))
+ runlock(rwl);
+ break;
+ case 4:
+ wlock(rwl);
+ wunlock(rwl);
+ break;
+ }
+ }
+ /* signal to allow core 0 to finish */
+ atomic_dec(&rwlock_counter);
+ }
+
+ /* send 4 messages to each non core 0 */
+ atomic_init(&rwlock_counter, (num_cpus - 1) * 4);
+ for (int i = 1; i < num_cpus; i++)
+ for (int j = 0; j < 4; j++)
+ send_kernel_message(i, __test_rwlock, 0, 0, 0, KMSG_ROUTINE);
+ while (atomic_read(&rwlock_counter))
+ cpu_relax();
+ printk("rwlock test complete\n");
+}
+
+/* Funcs and global vars for test_rv() */
+static struct rendez local_rv;
+static struct rendez *rv = &local_rv;
+/* reusing state and counter from test_cv... */
+
+static int __rendez_cond(void *arg)
+{
+ return *(bool*)arg;
+}
+
+void __test_rv_wakeup(uint32_t srcid, long a0, long a1, long a2)
+{
+ if (atomic_read(&counter) % 4)
+ cv_signal(cv);
+ else
+ cv_broadcast(cv);
+ atomic_dec(&counter);
+}
+
+void __test_rv_sleeper(uint32_t srcid, long a0, long a1, long a2)
+{
+ rendez_sleep(rv, __rendez_cond, (void*)&state);
+ atomic_dec(&counter);
+}
+
+void __test_rv_sleeper_timeout(uint32_t srcid, long a0, long a1, long a2)
+{
+ /* half-assed amount of time. */
+ rendez_sleep_timeout(rv, __rendez_cond, (void*)&state, a0);
+ atomic_dec(&counter);
+}
+
+void test_rv(void)
+{
+ int nr_msgs;
+
+ rendez_init(rv);
+ /* Test 0: signal without waiting */
+ rendez_wakeup(rv);
+ kthread_yield();
+ printk("test_rv: wakeup without sleeping complete\n");
+
+ /* Test 1: a few sleepers */
+ nr_msgs = num_cpus - 1; /* not using cpu 0 */
+ atomic_init(&counter, nr_msgs);
+ state = FALSE;
+ for (int i = 1; i < num_cpus; i++)
+ send_kernel_message(i, __test_rv_sleeper, 0, 0, 0, KMSG_ROUTINE);
+ udelay(1000000);
+ cmb();
+ state = TRUE;
+ rendez_wakeup(rv);
+ /* broadcast probably woke up the waiters on our core. since we want to
+ * spin on their completion, we need to yield for a bit. */
+ kthread_yield();
+ while (atomic_read(&counter))
+ cpu_relax();
+ printk("test_rv: bulk wakeup complete\n");
+
+ /* Test 2: different types of sleepers / timeouts */
+ state = FALSE;
+ nr_msgs = 0x500; /* any more than 0x20000 could go OOM */
+ atomic_init(&counter, nr_msgs);
+ for (int i = 0; i < nr_msgs; i++) {
+ int cpu = (i % (num_cpus - 1)) + 1;
+ /* timeouts from 0ms ..5000ms (enough that they should wake via cond */
+ if (atomic_read(&counter) % 5)
+ send_kernel_message(cpu, __test_rv_sleeper_timeout, i * 4, 0, 0,
+ KMSG_ROUTINE);
+ else
+ send_kernel_message(cpu, __test_rv_sleeper, 0, 0, 0, KMSG_ROUTINE);
+ }
+ kthread_yield(); /* run whatever messages we sent to ourselves */
+ state = TRUE;
+ while (atomic_read(&counter)) {
+ cpu_relax();
+ rendez_wakeup(rv);
+ udelay(1000000);
+ kthread_yield(); /* run whatever messages we sent to ourselves */
+ }
+ assert(!rv->cv.nr_waiters);
+ printk("test_rv: lots of sleepers/timeouts complete\n");
+}