qio: Add non-blocking queues
[akaros.git] / kern / src / ns / qio.c
index d41ccf5..4741f6c 100644 (file)
 #include <smp.h>
 #include <ip.h>
 
+#define PANIC_EXTRA(b)                                                          \
+{                                                                              \
+       if ((b)->extra_len)                                                        \
+               panic("%s doesn't handle extra_data", __FUNCTION__);               \
+}
+
 static uint32_t padblockcnt;
 static uint32_t concatblockcnt;
 static uint32_t pullupblockcnt;
@@ -38,9 +44,8 @@ struct queue {
        int len;                                        /* bytes allocated to queue */
        int dlen;                                       /* data bytes in queue */
        int limit;                                      /* max bytes in queue */
-       int iNULLim;                            /* initial limit */
+       int inilim;                             /* initial limit */
        int state;
-       int noblock;                            /* true if writes return immediately when q full */
        int eof;                                        /* number of eofs read by user */
 
        void (*kick) (void *);          /* restart output */
@@ -92,14 +97,20 @@ struct block *padblock(struct block *bp, int size)
 {
        int n;
        struct block *nbp;
+       uint8_t bcksum = bp->flag & BCKSUM_FLAGS;
+       uint16_t checksum_start = bp->checksum_start;
+       uint16_t checksum_offset = bp->checksum_offset;
+       uint16_t mss = bp->mss;
 
        QDEBUG checkb(bp, "padblock 1");
        if (size >= 0) {
                if (bp->rp - bp->base >= size) {
+                       bp->checksum_start += size;
                        bp->rp -= size;
                        return bp;
                }
 
+               PANIC_EXTRA(bp);
                if (bp->next)
                        panic("padblock %p", getcallerpc(&bp));
                n = BLEN(bp);
@@ -114,6 +125,8 @@ struct block *padblock(struct block *bp, int size)
        } else {
                size = -size;
 
+               PANIC_EXTRA(bp);
+
                if (bp->next)
                        panic("padblock %p", getcallerpc(&bp));
 
@@ -127,6 +140,12 @@ struct block *padblock(struct block *bp, int size)
                nbp->wp += n;
                freeb(bp);
        }
+       if (bcksum) {
+               nbp->flag |= bcksum;
+               nbp->checksum_start = checksum_start;
+               nbp->checksum_offset = checksum_offset;
+               nbp->mss = mss;
+       }
        QDEBUG checkb(nbp, "padblock 1");
        return nbp;
 }
@@ -173,6 +192,8 @@ struct block *concatblock(struct block *bp)
        if (bp->next == 0)
                return bp;
 
+       /* probably use parts of qclone */
+       PANIC_EXTRA(bp);
        nb = allocb(blocklen(bp));
        for (f = bp; f; f = f->next) {
                len = BLEN(f);
@@ -185,20 +206,93 @@ struct block *concatblock(struct block *bp)
        return nb;
 }
 
+/* Returns a block with the remaining contents of b all in the main body of the
+ * returned block.  Replace old references to b with the returned value (which
+ * may still be 'b', if no change was needed. */
+struct block *linearizeblock(struct block *b)
+{
+       struct block *newb;
+       size_t len;
+       struct extra_bdata *ebd;
+
+       if (!b->extra_len)
+               return b;
+
+       newb = allocb(BLEN(b));
+       len = BHLEN(b);
+       memcpy(newb->wp, b->rp, len);
+       newb->wp += len;
+       len = b->extra_len;
+       for (int i = 0; (i < b->nr_extra_bufs) && len; i++) {
+               ebd = &b->extra_data[i];
+               if (!ebd->base || !ebd->len)
+                       continue;
+               memcpy(newb->wp, (void*)(ebd->base + ebd->off), ebd->len);
+               newb->wp += ebd->len;
+               len -= ebd->len;
+       }
+       /* TODO: any other flags that need copied over? */
+       if (b->flag & BCKSUM_FLAGS) {
+               newb->flag |= (b->flag & BCKSUM_FLAGS);
+               newb->checksum_start = b->checksum_start;
+               newb->checksum_offset = b->checksum_offset;
+               newb->mss = b->mss;
+       }
+       freeb(b);
+       return newb;
+}
+
 /*
- *  make sure the first block has at least n bytes
+ *  make sure the first block has at least n bytes in its main body
  */
 struct block *pullupblock(struct block *bp, int n)
 {
-       int i;
+       int i, len, seglen;
        struct block *nbp;
+       struct extra_bdata *ebd;
 
        /*
         *  this should almost always be true, it's
         *  just to avoid every caller checking.
         */
-       if (BLEN(bp) >= n)
+       if (BHLEN(bp) >= n)
+               return bp;
+
+        /* a start at explicit main-body / header management */
+       if (bp->extra_len) {
+               if (n > bp->lim - bp->rp) {
+                       /* would need to realloc a new block and copy everything over. */
+                       panic("can't pullup %d bytes, no place to put it: bp->lim %p, bp->rp %p, bp->lim-bp->rp %d\n",
+                                       n, bp->lim, bp->rp, bp->lim-bp->rp);
+               }
+               len = n - BHLEN(bp);
+               if (len > bp->extra_len)
+                       panic("pullup more than extra (%d, %d, %d)\n",
+                             n, BHLEN(bp), bp->extra_len);
+               checkb(bp, "before pullup");
+               for (int i = 0; (i < bp->nr_extra_bufs) && len; i++) {
+                       ebd = &bp->extra_data[i];
+                       if (!ebd->base || !ebd->len)
+                               continue;
+                       seglen = MIN(ebd->len, len);
+                       memcpy(bp->wp, (void*)(ebd->base + ebd->off), seglen);
+                       bp->wp += seglen;
+                       len -= seglen;
+                       ebd->len -= seglen;
+                       ebd->off += seglen;
+                       bp->extra_len -= seglen;
+                       if (ebd->len == 0) {
+                               kfree((void *)ebd->base);
+                               ebd->off = 0;
+                               ebd->base = 0;
+                       }
+               }
+               /* maybe just call pullupblock recursively here */
+               if (len)
+                       panic("pullup %d bytes overdrawn\n", len);
+               checkb(bp, "after pullup");
                return bp;
+       }
 
        /*
         *  if not enough room in the first block,
@@ -242,13 +336,14 @@ struct block *pullupblock(struct block *bp, int n)
 }
 
 /*
- *  make sure the first block has at least n bytes
+ *  make sure the first block has at least n bytes in its main body
  */
 struct block *pullupqueue(struct queue *q, int n)
 {
        struct block *b;
 
-       if ((BLEN(q->bfirst) >= n))
+       /* TODO: lock to protect the queue links? */
+       if ((BHLEN(q->bfirst) >= n))
                return q->bfirst;
        q->bfirst = pullupblock(q->bfirst, n);
        for (b = q->bfirst; b != NULL && b->next != NULL; b = b->next) ;
@@ -256,13 +351,105 @@ struct block *pullupqueue(struct queue *q, int n)
        return q->bfirst;
 }
 
+/* throw away count bytes from the front of
+ * block's extradata.  Returns count of bytes
+ * thrown away
+ */
+
+static int pullext(struct block *bp, int count)
+{
+       struct extra_bdata *ed;
+       int i, rem, bytes = 0;
+
+       for (i = 0; bp->extra_len && count && i < bp->nr_extra_bufs; i++) {
+               ed = &bp->extra_data[i];
+               rem = MIN(count, ed->len);
+               bp->extra_len -= rem;
+               count -= rem;
+               bytes += rem;
+               ed->off += rem;
+               ed->len -= rem;
+               if (ed->len == 0) {
+                       kfree((void *)ed->base);
+                       ed->base = 0;
+                       ed->off = 0;
+               }
+       }
+       return bytes;
+}
+
+/* throw away count bytes from the end of a
+ * block's extradata.  Returns count of bytes
+ * thrown away
+ */
+
+static int dropext(struct block *bp, int count)
+{
+       struct extra_bdata *ed;
+       int i, rem, bytes = 0;
+
+       for (i = bp->nr_extra_bufs - 1; bp->extra_len && count && i >= 0; i--) {
+               ed = &bp->extra_data[i];
+               rem = MIN(count, ed->len);
+               bp->extra_len -= rem;
+               count -= rem;
+               bytes += rem;
+               ed->len -= rem;
+               if (ed->len == 0) {
+                       kfree((void *)ed->base);
+                       ed->base = 0;
+                       ed->off = 0;
+               }
+       }
+       return bytes;
+}
+
+/*
+ *  throw away up to count bytes from a
+ *  list of blocks.  Return count of bytes
+ *  thrown away.
+ */
+static int _pullblock(struct block **bph, int count, int free)
+{
+       struct block *bp;
+       int n, bytes;
+
+       bytes = 0;
+       if (bph == NULL)
+               return 0;
+
+       while (*bph != NULL && count != 0) {
+               bp = *bph;
+
+               n = MIN(BHLEN(bp), count);
+               bytes += n;
+               count -= n;
+               bp->rp += n;
+               n = pullext(bp, count);
+               bytes += n;
+               count -= n;
+               QDEBUG checkb(bp, "pullblock ");
+               if (BLEN(bp) == 0 && (free || count)) {
+                       *bph = bp->next;
+                       bp->next = NULL;
+                       freeb(bp);
+               }
+       }
+       return bytes;
+}
+
+int pullblock(struct block **bph, int count)
+{
+       return _pullblock(bph, count, 1);
+}
+
 /*
  *  trim to len bytes starting at offset
  */
 struct block *trimblock(struct block *bp, int offset, int len)
 {
-       uint32_t l;
-       struct block *nb, *startb;
+       uint32_t l, trim;
+       int olen = len;
 
        QDEBUG checkb(bp, "trimblock 1");
        if (blocklen(bp) < offset + len) {
@@ -270,30 +457,28 @@ struct block *trimblock(struct block *bp, int offset, int len)
                return NULL;
        }
 
-       while ((l = BLEN(bp)) < offset) {
-               offset -= l;
-               nb = bp->next;
-               bp->next = NULL;
-               freeb(bp);
-               bp = nb;
+       l =_pullblock(&bp, offset, 0);
+       if (bp == NULL)
+               return NULL;
+       if (l != offset) {
+               freeblist(bp);
+               return NULL;
        }
 
-       startb = bp;
-       bp->rp += offset;
-
        while ((l = BLEN(bp)) < len) {
                len -= l;
                bp = bp->next;
        }
 
-       bp->wp -= (BLEN(bp) - len);
+       trim = BLEN(bp) - len;
+       trim -= dropext(bp, trim);
+       bp->wp -= trim;
 
        if (bp->next) {
                freeblist(bp->next);
                bp->next = NULL;
        }
-
-       return startb;
+       return bp;
 }
 
 /*
@@ -306,6 +491,13 @@ struct block *copyblock(struct block *bp, int count)
 
        QDEBUG checkb(bp, "copyblock 0");
        nbp = allocb(count);
+       if (bp->flag & BCKSUM_FLAGS) {
+               nbp->flag |= (bp->flag & BCKSUM_FLAGS);
+               nbp->checksum_start = bp->checksum_start;
+               nbp->checksum_offset = bp->checksum_offset;
+               nbp->mss = bp->mss;
+       }
+       PANIC_EXTRA(bp);
        for (; count > 0 && bp != 0; bp = bp->next) {
                l = BLEN(bp);
                if (l > count)
@@ -334,6 +526,7 @@ struct block *adjustblock(struct block *bp, int len)
                return NULL;
        }
 
+       PANIC_EXTRA(bp);
        if (bp->rp + len > bp->lim) {
                nbp = copyblock(bp, len);
                freeblist(bp);
@@ -351,37 +544,6 @@ struct block *adjustblock(struct block *bp, int len)
        return bp;
 }
 
-/*
- *  throw away up to count bytes from a
- *  list of blocks.  Return count of bytes
- *  thrown away.
- */
-int pullblock(struct block **bph, int count)
-{
-       struct block *bp;
-       int n, bytes;
-
-       bytes = 0;
-       if (bph == NULL)
-               return 0;
-
-       while (*bph != NULL && count != 0) {
-               bp = *bph;
-               n = BLEN(bp);
-               if (count < n)
-                       n = count;
-               bytes += n;
-               count -= n;
-               bp->rp += n;
-               QDEBUG checkb(bp, "pullblock ");
-               if (BLEN(bp) == 0) {
-                       *bph = bp->next;
-                       bp->next = NULL;
-                       freeb(bp);
-               }
-       }
-       return bytes;
-}
 
 /*
  *  get next block from a queue, return null if nothing there
@@ -428,7 +590,8 @@ struct block *qget(struct queue *q)
 int qdiscard(struct queue *q, int len)
 {
        struct block *b;
-       int dowakeup, n, sofar;
+       int dowakeup, n, sofar, body_amt, extra_amt;
+       struct extra_bdata *ebd;
 
        spin_lock_irqsave(&q->lock);
        for (sofar = 0; sofar < len; sofar += n) {
@@ -445,8 +608,32 @@ int qdiscard(struct queue *q, int len)
                        freeb(b);
                } else {
                        n = len - sofar;
-                       b->rp += n;
                        q->dlen -= n;
+                       /* partial block removal */
+                       body_amt = MIN(BHLEN(b), n);
+                       b->rp += body_amt;
+                       extra_amt = n - body_amt;
+                       /* reduce q->len by the amount we remove from the extras.  The
+                        * header will always be accounted for above, during block removal.
+                        * */
+                       q->len -= extra_amt;
+                       for (int i = 0; (i < b->nr_extra_bufs) && extra_amt; i++) {
+                               ebd = &b->extra_data[i];
+                               if (!ebd->base || !ebd->len)
+                                       continue;
+                               if (extra_amt >= ebd->len) {
+                                       /* remove the entire entry, note the kfree release */
+                                       b->extra_len -= ebd->len;
+                                       extra_amt -= ebd->len;
+                                       kfree((void*)ebd->base);
+                                       ebd->base = ebd->off = ebd->len = 0;
+                                       continue;
+                               }
+                               ebd->off += extra_amt;
+                               ebd->len -= extra_amt;
+                               b->extra_len -= extra_amt;
+                               extra_amt = 0;
+                       }
                }
        }
 
@@ -507,6 +694,7 @@ int qconsume(struct queue *q, void *vp, int len)
                tofree = b;
        };
 
+       PANIC_EXTRA(b);
        if (n < len)
                len = n;
        memmove(p, b->rp, len);
@@ -652,6 +840,8 @@ struct block *packblock(struct block *bp)
        struct block **l, *nbp;
        int n;
 
+       if (bp->extra_len)
+               return bp;
        for (l = &bp; *l; l = &(*l)->next) {
                nbp = *l;
                n = BLEN(nbp);
@@ -705,6 +895,7 @@ int qproduce(struct queue *q, void *vp, int len)
                /* b->next = 0; done by iallocb() */
                q->len += BALLOC(b);
        }
+       PANIC_EXTRA(b);
        memmove(b->wp, p, len);
        producecnt += len;
        b->wp += len;
@@ -726,10 +917,166 @@ int qproduce(struct queue *q, void *vp, int len)
        return len;
 }
 
+/* Add an extra_data entry to newb at newb_idx pointing to b's body, starting at
+ * body_rp, for up to len.  Returns the len consumed. 
+ *
+ * The base is 'b', so that we can kfree it later.  This currently ties us to
+ * using kfree for the release method for all extra_data.
+ *
+ * It is possible to have a body size that is 0, if there is no offset, and
+ * b->wp == b->rp.  This will have an extra data entry of 0 length. */
+static size_t point_to_body(struct block *b, uint8_t *body_rp,
+                            struct block *newb, unsigned int newb_idx,
+                            size_t len)
+{
+       struct extra_bdata *ebd = &newb->extra_data[newb_idx];
+
+       assert(newb_idx < newb->nr_extra_bufs);
+
+       kmalloc_incref(b);
+       ebd->base = (uintptr_t)b;
+       ebd->off = (uint32_t)(body_rp - (uint8_t*)b);
+       ebd->len = MIN(b->wp - body_rp, len);   /* think of body_rp as b->rp */
+       assert((int)ebd->len >= 0);
+       newb->extra_len += ebd->len;
+       return ebd->len;
+}
+
+/* Add an extra_data entry to newb at newb_idx pointing to b's b_idx'th
+ * extra_data buf, at b_off within that buffer, for up to len.  Returns the len
+ * consumed.
+ *
+ * We can have blocks with 0 length, but they are still refcnt'd.  See above. */
+static size_t point_to_buf(struct block *b, unsigned int b_idx, uint32_t b_off,
+                           struct block *newb, unsigned int newb_idx,
+                           size_t len)
+{
+       struct extra_bdata *n_ebd = &newb->extra_data[newb_idx];
+       struct extra_bdata *b_ebd = &b->extra_data[b_idx];
+
+       assert(b_idx < b->nr_extra_bufs);
+       assert(newb_idx < newb->nr_extra_bufs);
+
+       kmalloc_incref((void*)b_ebd->base);
+       n_ebd->base = b_ebd->base;
+       n_ebd->off = b_ebd->off + b_off;
+       n_ebd->len = MIN(b_ebd->len - b_off, len);
+       newb->extra_len += n_ebd->len;
+       return n_ebd->len;
+}
+
+/* given a string of blocks, fills the new block's extra_data  with the contents
+ * of the blist [offset, len + offset)
+ *
+ * returns 0 on success.  the only failure is if the extra_data array was too
+ * small, so this returns a positive integer saying how big the extra_data needs
+ * to be.
+ *
+ * callers are responsible for protecting the list structure. */
+static int __blist_clone_to(struct block *blist, struct block *newb, int len,
+                            uint32_t offset)
+{
+       struct block *b, *first;
+       unsigned int nr_bufs = 0;
+       unsigned int b_idx, newb_idx = 0;
+       uint8_t *first_main_body = 0;
+
+       /* find the first block; keep offset relative to the latest b in the list */
+       for (b = blist; b; b = b->next) {
+               if (BLEN(b) > offset)
+                       break;
+               offset -= BLEN(b);
+       }
+       /* qcopy semantics: if you asked for an offset outside the block list, you
+        * get an empty block back */
+       if (!b)
+               return 0;
+       first = b;
+       /* upper bound for how many buffers we'll need in newb */
+       for (/* b is set*/; b; b = b->next) {
+               nr_bufs += 1 + b->nr_extra_bufs;        /* 1 for the main body */
+       }
+       /* we might be holding a spinlock here, so we won't wait for kmalloc */
+       block_add_extd(newb, nr_bufs, 0);
+       if (newb->nr_extra_bufs < nr_bufs) {
+               /* caller will need to alloc these, then re-call us */
+               return nr_bufs;
+       }
+       for (b = first; b && len; b = b->next) {
+               b_idx = 0;
+               if (offset) {
+                       if (offset < BHLEN(b)) {
+                               /* off is in the main body */
+                               len -= point_to_body(b, b->rp + offset, newb, newb_idx, len);
+                               newb_idx++;
+                       } else {
+                               /* off is in one of the buffers (or just past the last one).
+                                * we're not going to point to b's main body at all. */
+                               offset -= BHLEN(b);
+                               assert(b->extra_data);
+                               /* assuming these extrabufs are packed, or at least that len
+                                * isn't gibberish */
+                               while (b->extra_data[b_idx].len <= offset) {
+                                       offset -= b->extra_data[b_idx].len;
+                                       b_idx++;
+                               }
+                               /* now offset is set to our offset in the b_idx'th buf */
+                               len -= point_to_buf(b, b_idx, offset, newb, newb_idx, len);
+                               newb_idx++;
+                               b_idx++;
+                       }
+                       offset = 0;
+               } else {
+                       len -= point_to_body(b, b->rp, newb, newb_idx, len);
+                       newb_idx++;
+               }
+               /* knock out all remaining bufs.  we only did one point_to_ op by now,
+                * and any point_to_ could be our last if it consumed all of len. */
+               for (int i = b_idx; (i < b->nr_extra_bufs) && len; i++) {
+                       len -= point_to_buf(b, i, 0, newb, newb_idx, len);
+                       newb_idx++;
+               }
+       }
+       return 0;
+}
+
+struct block *blist_clone(struct block *blist, int header_len, int len,
+                          uint32_t offset)
+{
+       int ret;
+       struct block *newb = allocb(header_len);
+       do {
+               ret = __blist_clone_to(blist, newb, len, offset);
+               if (ret)
+                       block_add_extd(newb, ret, KMALLOC_WAIT);
+       } while (ret);
+       return newb;
+}
+
+/* given a queue, makes a single block with header_len reserved space in the
+ * block main body, and the contents of [offset, len + offset) pointed to in the
+ * new blocks ext_data. */
+struct block *qclone(struct queue *q, int header_len, int len, uint32_t offset)
+{
+       int ret;
+       struct block *newb = allocb(header_len);
+       /* the while loop should rarely be used: it would require someone
+        * concurrently adding to the queue. */
+       do {
+               /* TODO: RCU: protecting the q list (b->next) (need read lock) */
+               spin_lock_irqsave(&q->lock);
+               ret = __blist_clone_to(q->bfirst, newb, len, offset);
+               spin_unlock_irqsave(&q->lock);
+               if (ret)
+                       block_add_extd(newb, ret, KMALLOC_WAIT);
+       } while (ret);
+       return newb;
+}
+
 /*
  *  copy from offset in the queue
  */
-struct block *qcopy(struct queue *q, int len, uint32_t offset)
+struct block *qcopy_old(struct queue *q, int len, uint32_t offset)
 {
        int sofar;
        int n;
@@ -761,6 +1108,7 @@ struct block *qcopy(struct queue *q, int len, uint32_t offset)
        for (sofar = 0; sofar < len;) {
                if (n > len - sofar)
                        n = len - sofar;
+               PANIC_EXTRA(b);
                memmove(nb->wp, p, n);
                qcopycnt += n;
                sofar += n;
@@ -776,6 +1124,15 @@ struct block *qcopy(struct queue *q, int len, uint32_t offset)
        return nb;
 }
 
+struct block *qcopy(struct queue *q, int len, uint32_t offset)
+{
+#ifdef CONFIG_BLOCK_EXTRAS
+       return qclone(q, 0, len, offset);
+#else
+       return qcopy_old(q, len, offset);
+#endif
+}
+
 static void qinit_common(struct queue *q)
 {
        spinlock_init_irqsave(&q->lock);
@@ -797,13 +1154,12 @@ struct queue *qopen(int limit, int msg, void (*kick) (void *), void *arg)
                return 0;
        qinit_common(q);
 
-       q->limit = q->iNULLim = limit;
+       q->limit = q->inilim = limit;
        q->kick = kick;
        q->arg = arg;
        q->state = msg;
        q->state |= Qstarve;
        q->eof = 0;
-       q->noblock = 0;
 
        return q;
 }
@@ -833,11 +1189,12 @@ static int notempty(void *a)
        return (q->state & Qclosed) || q->bfirst != 0;
 }
 
-/* wait for the queue to be non-empty or closed.
+/* Wait for the queue to be non-empty or closed.  Returns TRUE for a successful
+ * wait, FALSE on Qclose (without error)
  *
- * called with q ilocked.  rendez may error out, back through the caller, with
+ * Called with q ilocked.  May error out, back through the caller, with
  * the irqsave lock unlocked.  */
-static int qwait(struct queue *q)
+static bool qwait(struct queue *q)
 {
        /* wait for data */
        for (;;) {
@@ -845,20 +1202,28 @@ static int qwait(struct queue *q)
                        break;
 
                if (q->state & Qclosed) {
-                       if (++q->eof > 3)
-                               return -1;
-                       if (*q->err && strcmp(q->err, Ehungup) != 0)
-                               return -1;
-                       return 0;
+                       if (++q->eof > 3) {
+                               spin_unlock_irqsave(&q->lock);
+                               error("multiple reads on a closed queue");
+                       }
+                       if (*q->err && strcmp(q->err, Ehungup) != 0) {
+                               spin_unlock_irqsave(&q->lock);
+                               error(q->err);
+                       }
+                       return FALSE;
+               }
+               if (q->state & Qnonblock) {
+                       spin_unlock_irqsave(&q->lock);
+                       set_errno(EAGAIN);
+                       error("queue empty");
                }
-
                q->state |= Qstarve;    /* flag requesting producer to wake me */
                spin_unlock_irqsave(&q->lock);
                /* may throw an error() */
                rendez_sleep(&q->rr, notempty, q);
                spin_lock_irqsave(&q->lock);
        }
-       return 1;
+       return TRUE;
 }
 
 /*
@@ -866,6 +1231,7 @@ static int qwait(struct queue *q)
  */
 void qaddlist(struct queue *q, struct block *b)
 {
+       /* TODO: q lock? */
        /* queue the block */
        if (q->bfirst)
                q->blast->next = b;
@@ -896,6 +1262,45 @@ struct block *qremove(struct queue *q)
        return b;
 }
 
+static size_t read_from_block(struct block *b, uint8_t *to, size_t amt)
+{
+       size_t copy_amt, retval = 0;
+       struct extra_bdata *ebd;
+       
+       copy_amt = MIN(BHLEN(b), amt);
+       memcpy(to, b->rp, copy_amt);
+       /* advance the rp, since this block not be completely consumed and future
+        * reads need to know where to pick up from */
+       b->rp += copy_amt;
+       to += copy_amt;
+       amt -= copy_amt;
+       retval += copy_amt;
+       for (int i = 0; (i < b->nr_extra_bufs) && amt; i++) {
+               ebd = &b->extra_data[i];
+               /* skip empty entires.  if we track this in the struct block, we can
+                * just start the for loop early */
+               if (!ebd->base || !ebd->len)
+                       continue;
+               copy_amt = MIN(ebd->len, amt);
+               memcpy(to, (void*)(ebd->base + ebd->off), copy_amt);
+               /* we're actually consuming the entries, just like how we advance rp up
+                * above, and might only consume part of one. */
+               ebd->len -= copy_amt;
+               ebd->off += copy_amt;
+               b->extra_len -= copy_amt;
+               if (!ebd->len) {
+                       /* we don't actually have to decref here.  it's also done in
+                        * freeb().  this is the earliest we can free. */
+                       kfree((void*)ebd->base);
+                       ebd->base = ebd->off = 0;
+               }
+               to += copy_amt;
+               amt -= copy_amt;
+               retval += copy_amt;
+       }
+       return retval;
+}
+
 /*
  *  copy the contents of a string of blocks into
  *  memory.  emptied blocks are freed.  return
@@ -906,17 +1311,18 @@ struct block *bl2mem(uint8_t * p, struct block *b, int n)
        int i;
        struct block *next;
 
+       /* could be slicker here, since read_from_block is smart */
        for (; b != NULL; b = next) {
                i = BLEN(b);
                if (i > n) {
-                       memmove(p, b->rp, n);
-                       b->rp += n;
+                       /* partial block, consume some */
+                       read_from_block(b, p, n);
                        return b;
                }
-               memmove(p, b->rp, i);
+               /* full block, consume all and move on */
+               i = read_from_block(b, p, i);
                n -= i;
                p += i;
-               b->rp += i;
                next = b->next;
                freeb(b);
        }
@@ -945,6 +1351,7 @@ struct block *mem2bl(uint8_t * p, int len)
                        n = Maxatomic;
 
                *l = b = allocb(n);
+               /* TODO consider extra_data */
                memmove(b->wp, p, n);
                b->wp += n;
                p += n;
@@ -1010,17 +1417,12 @@ struct block *qbread(struct queue *q, int len)
        }
 
        spin_lock_irqsave(&q->lock);
-       switch (qwait(q)) {
-               case 0:
-                       /* queue closed */
-                       spin_unlock_irqsave(&q->lock);
-                       qunlock(&q->rlock);
-                       poperror();
-                       return NULL;
-               case -1:
-                       /* multiple reads on a closed queue */
-                       spin_unlock_irqsave(&q->lock);
-                       error(q->err);
+       if (!qwait(q)) {
+               /* queue closed */
+               spin_unlock_irqsave(&q->lock);
+               qunlock(&q->rlock);
+               poperror();
+               return NULL;
        }
 
        /* if we get here, there's at least one block in the queue */
@@ -1030,6 +1432,7 @@ struct block *qbread(struct queue *q, int len)
        /* split block if it's too big and this is not a message queue */
        nb = b;
        if (n > len) {
+               PANIC_EXTRA(b);
                if ((q->state & Qmsg) == 0) {
                        n -= len;
                        b = allocb(n);
@@ -1066,17 +1469,12 @@ long qread(struct queue *q, void *vp, int len)
 
        spin_lock_irqsave(&q->lock);
 again:
-       switch (qwait(q)) {
-               case 0:
-                       /* queue closed */
-                       spin_unlock_irqsave(&q->lock);
-                       qunlock(&q->rlock);
-                       poperror();
-                       return 0;
-               case -1:
-                       /* multiple reads on a closed queue */
-                       spin_unlock_irqsave(&q->lock);
-                       error(q->err);
+       if (!qwait(q)) {
+               /* queue closed */
+               spin_unlock_irqsave(&q->lock);
+               qunlock(&q->rlock);
+               poperror();
+               return 0;
        }
 
        /* if we get here, there's at least one block in the queue */
@@ -1143,7 +1541,7 @@ static int qnotfull(void *a)
        return q->len < q->limit || (q->state & Qclosed);
 }
 
-uint32_t noblockcnt;
+uint32_t dropcnt;
 
 /*
  *  add a block to a queue obeying flow control
@@ -1180,14 +1578,21 @@ long qbwrite(struct queue *q, struct block *b)
 
        /* if nonblocking, don't queue over the limit */
        if (q->len >= q->limit) {
-               if (q->noblock) {
+               /* drop overflow takes priority over regular non-blocking */
+               if (q->state & Qdropoverflow) {
                        spin_unlock_irqsave(&q->lock);
                        freeb(b);
-                       noblockcnt += n;
+                       dropcnt += n;
                        qunlock(&q->wlock);
                        poperror();
                        return n;
                }
+               if (q->state & Qnonblock) {
+                       spin_unlock_irqsave(&q->lock);
+                       freeb(b);
+                       set_errno(EAGAIN);
+                       error("queue full");
+               }
        }
 
        /* queue the block */
@@ -1231,7 +1636,7 @@ long qbwrite(struct queue *q, struct block *b)
         *  queue infinite crud.
         */
        for (;;) {
-               if (q->noblock || qnotfull(q))
+               if ((q->state & (Qdropoverflow | Qnonblock)) || qnotfull(q))
                        break;
 
                spin_lock_irqsave(&q->lock);
@@ -1245,15 +1650,50 @@ long qbwrite(struct queue *q, struct block *b)
        return n;
 }
 
+long qibwrite(struct queue *q, struct block *b)
+{
+       int n, dowakeup;
+
+       dowakeup = 0;
+
+       n = BLEN(b);
+
+       spin_lock_irqsave(&q->lock);
+
+       QDEBUG checkb(b, "qibwrite");
+       if (q->bfirst)
+               q->blast->next = b;
+       else
+               q->bfirst = b;
+       q->blast = b;
+       q->len += BALLOC(b);
+       q->dlen += n;
+
+       if (q->state & Qstarve) {
+               q->state &= ~Qstarve;
+               dowakeup = 1;
+       }
+
+       spin_unlock_irqsave(&q->lock);
+
+       if (dowakeup) {
+               if (q->kick)
+                       q->kick(q->arg);
+               rendez_wakeup(&q->rr);
+       }
+
+       return n;
+}
+
 /*
  *  write to a queue.  only Maxatomic bytes at a time is atomic.
  */
 int qwrite(struct queue *q, void *vp, int len)
 {
-       ERRSTACK(1);
        int n, sofar;
        struct block *b;
        uint8_t *p = vp;
+       void *ext_buf;
 
        QDEBUG if (!islo())
                 printd("qwrite hi %p\n", getcallerpc(&q));
@@ -1261,18 +1701,32 @@ int qwrite(struct queue *q, void *vp, int len)
        sofar = 0;
        do {
                n = len - sofar;
+               /* This is 64K, the max amount per single block.  Still a good value? */
                if (n > Maxatomic)
                        n = Maxatomic;
 
+               /* If n is small, we don't need to bother with the extra_data.  But
+                * until the whole stack can handle extd blocks, we'll use them
+                * unconditionally. */
+#ifdef CONFIG_BLOCK_EXTRAS
+               /* allocb builds in 128 bytes of header space to all blocks, but this is
+                * only available via padblock (to the left).  we also need some space
+                * for pullupblock for some basic headers (like icmp) that get written
+                * in directly */
+               b = allocb(64);
+               ext_buf = kmalloc(n, 0);
+               memcpy(ext_buf, p + sofar, n);
+               block_add_extd(b, 1, KMALLOC_WAIT); /* returns 0 on success */
+               b->extra_data[0].base = (uintptr_t)ext_buf;
+               b->extra_data[0].off = 0;
+               b->extra_data[0].len = n;
+               b->extra_len += n;
+#else
                b = allocb(n);
-               if (waserror()) {
-                       freeb(b);
-                       nexterror();
-               }
                memmove(b->wp, p + sofar, n);
-               poperror();
                b->wp += n;
-
+#endif
+                       
                qbwrite(q, b);
 
                sofar += n;
@@ -1302,32 +1756,12 @@ int qiwrite(struct queue *q, void *vp, int len)
                b = iallocb(n);
                if (b == NULL)
                        break;
+               /* TODO consider extra_data */
                memmove(b->wp, p + sofar, n);
+               /* this adjusts BLEN to be n, or at least it should */
                b->wp += n;
-
-               spin_lock_irqsave(&q->lock);
-
-               QDEBUG checkb(b, "qiwrite");
-               if (q->bfirst)
-                       q->blast->next = b;
-               else
-                       q->bfirst = b;
-               q->blast = b;
-               q->len += BALLOC(b);
-               q->dlen += n;
-
-               if (q->state & Qstarve) {
-                       q->state &= ~Qstarve;
-                       dowakeup = 1;
-               }
-
-               spin_unlock_irqsave(&q->lock);
-
-               if (dowakeup) {
-                       if (q->kick)
-                               q->kick(q->arg);
-                       rendez_wakeup(&q->rr);
-               }
+               assert(n == BLEN(b));
+               qibwrite(q, b);
 
                sofar += n;
        } while (sofar < len && (q->state & Qmsg) == 0);
@@ -1359,13 +1793,12 @@ void qclose(struct queue *q)
        /* mark it */
        spin_lock_irqsave(&q->lock);
        q->state |= Qclosed;
-       q->state &= ~(Qflow | Qstarve);
+       q->state &= ~(Qflow | Qstarve | Qdropoverflow | Qnonblock);
        strncpy(q->err, Ehungup, sizeof(q->err));
        bfirst = q->bfirst;
        q->bfirst = 0;
        q->len = 0;
        q->dlen = 0;
-       q->noblock = 0;
        spin_unlock_irqsave(&q->lock);
 
        /* free queued blocks */
@@ -1413,7 +1846,7 @@ void qreopen(struct queue *q)
        q->state &= ~Qclosed;
        q->state |= Qstarve;
        q->eof = 0;
-       q->limit = q->iNULLim;
+       q->limit = q->inilim;
        spin_unlock_irqsave(&q->lock);
 }
 
@@ -1455,11 +1888,23 @@ void qsetlimit(struct queue *q, int limit)
 }
 
 /*
- *  set blocking/nonblocking
+ *  set whether writes drop overflowing blocks, or if we sleep
  */
-void qnoblock(struct queue *q, int onoff)
+void qdropoverflow(struct queue *q, bool onoff)
 {
-       q->noblock = onoff;
+       if (onoff)
+               q->state |= Qdropoverflow;
+       else
+               q->state &= ~Qdropoverflow;
+}
+
+/* set whether or not the queue is nonblocking, in the EAGAIN sense. */
+void qnonblock(struct queue *q, bool onoff)
+{
+       if (onoff)
+               q->state |= Qnonblock;
+       else
+               q->state &= ~Qnonblock;
 }
 
 /*