akaros/kern/src/ucq.c
<<
>>
Prefs
   1/* Copyright (c) 2011 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Kernel side of ucqs. */
   6
   7#include <ucq.h>
   8#include <umem.h>
   9#include <assert.h>
  10#include <mm.h>
  11#include <atomic.h>
  12
  13/* Proc p needs to be current, and you should have checked that ucq is valid
  14 * memory.  We'll assert it here, to catch any of your bugs.  =) */
  15void send_ucq_msg(struct ucq *ucq, struct proc *p, struct event_msg *msg)
  16{
  17        uintptr_t my_slot = 0;
  18        struct ucq_page *new_page, *old_page;
  19        struct msg_container *my_msg;
  20
  21        assert(is_user_rwaddr(ucq, sizeof(struct ucq)));
  22        /* So we can try to send ucqs to _Ss before they initialize */
  23        if (!ucq->ucq_ready) {
  24                if (__proc_is_mcp(p))
  25                        warn("proc %d is _M with an uninitialized ucq %p\n",
  26                             p->pid, ucq);
  27                return;
  28        }
  29        /* Bypass fetching/incrementing the counter if we're overflowing, helps
  30         * prevent wraparound issues on the counter (only 12 bits of counter) */
  31        if (ucq->prod_overflow)
  32                goto grab_lock;
  33        /* Grab a potential slot */
  34        my_slot = (uintptr_t)atomic_fetch_and_add(&ucq->prod_idx, 1);
  35        if (slot_is_good(my_slot))
  36                goto have_slot;
  37        /* Warn others to not bother with the fetch_and_add */
  38        ucq->prod_overflow = TRUE;
  39        /* Sanity check */
  40        if (PGOFF(my_slot) > 3000)
  41                warn("Abnormally high counter, something is wrong!");
  42grab_lock:
  43        /* Lock, for this proc/ucq.  Using an irqsave, since we may want to send
  44         * ucq messages from irq context. */
  45        hash_lock_irqsave(p->ucq_hashlock, (long)ucq);
  46        /* Grab a potential slot (again, preventing another DoS) */
  47        my_slot = (uintptr_t)atomic_fetch_and_add(&ucq->prod_idx, 1);
  48        if (slot_is_good(my_slot))
  49                goto unlock_lock;
  50        /* Check to make sure the old_page was good before we do anything too
  51         * intense (we deref it later).  Bad pages are likely due to
  52         * user-malfeasance or neglect.
  53         *
  54         * The is_user_rwaddr() check on old_page might catch addresses below
  55         * MMAP_LOWEST_VA, and we can also handle a PF, but we'll explicitly
  56         * check for 0 just to be sure (and it's a likely error). */
  57        old_page = (struct ucq_page*)PTE_ADDR(my_slot);
  58        if (!is_user_rwaddr(old_page, PGSIZE) || !old_page)
  59                goto error_addr_unlock;
  60        /* Things still aren't fixed, so we need to reset everything */
  61        /* Try to get the spare page, so we don't have to mmap a new one */
  62        new_page = (struct ucq_page*)atomic_swap(&ucq->spare_pg, 0);
  63        if (!new_page) {
  64                /* Warn if we have a ridiculous amount of pages in the ucq */
  65                if (atomic_fetch_and_add(&ucq->nr_extra_pgs, 1) >
  66                    UCQ_WARN_THRESH)
  67                        warn("Over %d pages in ucq %p for pid %d!\n",
  68                             UCQ_WARN_THRESH, ucq, p->pid);
  69                /* Giant warning: don't ask for anything other than anonymous
  70                 * memory at a non-fixed location.  o/w, it may cause a TLB
  71                 * shootdown, which grabs the proc_lock, and potentially
  72                 * deadlock the system. */
  73                new_page = (struct ucq_page*)do_mmap(p, 0, PGSIZE,
  74                                                     PROT_READ | PROT_WRITE,
  75                                                     MAP_ANONYMOUS |
  76                                                     MAP_POPULATE | MAP_PRIVATE,
  77                                                     NULL, 0);
  78                assert(new_page);
  79                assert(!PGOFF(new_page));
  80        } else {
  81                /* If we're using the user-supplied new_page, we need to check
  82                 * it */
  83                if (!is_user_rwaddr(new_page, PGSIZE) || PGOFF(new_page))
  84                        goto error_addr_unlock;
  85        }
  86        /* Now we have a page.  Lets make sure it's set up properly */
  87        new_page->header.cons_next_pg = 0;
  88        new_page->header.nr_cons = 0;
  89        /* Link the old page to the new one, so consumers know how to follow */
  90        old_page->header.cons_next_pg = (uintptr_t)new_page;
  91        /* Set the prod_idx counter to 1 (and the new_page), reserving the first
  92         * slot (number '0') for us (reservation prevents DoS). */
  93        my_slot = (uintptr_t)new_page;
  94        atomic_set(&ucq->prod_idx, my_slot + 1);
  95        /* Fallthrough to clear overflow and unlock */
  96unlock_lock:
  97        /* Clear the overflow, so new producers will try to get a slot */
  98        ucq->prod_overflow = FALSE;
  99        /* At this point, any normal (non-locking) producers can succeed in
 100         * getting a slot.  The ones that failed earlier will fight for the
 101         * lock, then quickly proceed when they get a good slot */
 102        hash_unlock_irqsave(p->ucq_hashlock, (long)ucq);
 103        /* Fall through to having a slot */
 104have_slot:
 105        /* Sanity check on our slot. */
 106        assert(slot_is_good(my_slot));
 107        /* Convert slot to actual msg_container.  Note we never actually deref
 108         * my_slot here (o/w we'd need a rw_addr check). */
 109        my_msg = slot2msg(my_slot);
 110        /* Make sure our msg is user RW */
 111        if (!is_user_rwaddr(my_msg, sizeof(struct msg_container)))
 112                goto error_addr;
 113        /* Finally write the message */
 114        my_msg->ev_msg = *msg;
 115        wmb();
 116        /* Now that the write is done, signal to the consumer that they can
 117         * consume our message (they could have been spinning on it) */
 118        my_msg->ready = TRUE;
 119        return;
 120error_addr_unlock:
 121        /* Had a bad addr while holding the lock.  This is a bit more serious */
 122        warn("Bad addr in ucq page management!");
 123        ucq->prod_overflow = FALSE;
 124        hash_unlock_irqsave(p->ucq_hashlock, (long)ucq);
 125        /* Fall-through to normal error out */
 126error_addr:
 127        warn("Invalid user address, not sending a message");
 128        /* TODO: consider killing the process here.  For now, just catch it.
 129         * For some cases, we have a slot that we never fill in, though if we
 130         * had a bad addr, none of this will work out and the kernel just needs
 131         * to protect itself. */
 132}
 133
 134/* Debugging */
 135#include <smp.h>
 136#include <pmap.h>
 137
 138/* Prints the status and up to 25 of the previous messages for the UCQ. */
 139void print_ucq(struct proc *p, struct ucq *ucq)
 140{
 141        struct ucq_page *ucq_pg;
 142        uintptr_t old_proc = switch_to(p);
 143
 144        printk("UCQ %p\n", ucq);
 145        printk("prod_idx: %p, cons_idx: %p\n", atomic_read(&ucq->prod_idx),
 146               atomic_read(&ucq->cons_idx));
 147        printk("spare_pg: %p, nr_extra_pgs: %d\n", atomic_read(&ucq->spare_pg),
 148               atomic_read(&ucq->nr_extra_pgs));
 149        printk("prod_overflow: %d\n", ucq->prod_overflow);
 150        /* Try to see our previous ucqs */
 151        for (uintptr_t i = atomic_read(&ucq->prod_idx), count = 0;
 152             slot_is_good(i) && count < 25;  i--, count++) {
 153                /* only attempt to print messages on the same page */
 154                if (PTE_ADDR(i) != PTE_ADDR(atomic_read(&ucq->prod_idx)))
 155                        break;
 156                printk("Prod idx %p message ready is %p\n", i,
 157                       slot2msg(i)->ready);
 158        }
 159        /* look at the chain, starting from cons_idx */
 160        ucq_pg = (struct ucq_page*)PTE_ADDR(atomic_read(&ucq->cons_idx));
 161        for (int i = 0; i < 10 && ucq_pg; i++) {
 162                printk("#%02d: Cons page: %p, nr_cons: %d, next page: %p\n", i,
 163                       ucq_pg, ucq_pg->header.nr_cons,
 164                       ucq_pg->header.cons_next_pg);
 165                ucq_pg = (struct ucq_page*)(ucq_pg->header.cons_next_pg);
 166        }
 167        switch_back(p, old_proc);
 168}
 169