WIP-pop-3000
[akaros.git] / kern / src / net / tcp.c
index bc3ce94..0f9c4ac 100644 (file)
-/**
- * @file
- * Transmission Control Protocol for IP
+/* Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
+ * Portions Copyright © 1997-1999 Vita Nuova Limited
+ * Portions Copyright © 2000-2007 Vita Nuova Holdings Limited
+ *                                (www.vitanuova.com)
+ * Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
  *
- * This file contains common functions for the TCP implementation, such as functinos
- * for manipulating the data structures and the TCP timer functions. TCP functions
- * related to input and output is found in tcp_in.c and tcp_out.c respectively.
+ * Modified for the Akaros operating system:
+ * Copyright (c) 2013-2014 The Regents of the University of California
+ * Copyright (c) 2013-2017 Google Inc.
  *
- */
-
-/*
- * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
- * All rights reserved. 
- * 
- * Redistribution and use in source and binary forms, with or without modification, 
- * are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- *    this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- *    this list of conditions and the following disclaimer in the documentation
- *    and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission. 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
  *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 
- * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT 
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 
- * OF SUCH DAMAGE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
  *
- * This file is part of the lwIP TCP/IP stack.
- * 
- * Author: Adam Dunkels <adam@sics.se>
- * Modified by David Zhu <yuzhu@cs.berkeley.edu> to be used for Akaros
- *
- */
-
-#include <ros/common.h>
-#include <string.h>
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE. */
+
+#include <vfs.h>
+#include <kfs.h>
+#include <slab.h>
 #include <kmalloc.h>
-#include <net.h>
-#include <sys/queue.h>
-#include <atomic.h>
-
-#include <bits/netinet.h>
+#include <kref.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+#include <error.h>
+#include <cpio.h>
+#include <pmap.h>
+#include <smp.h>
 #include <net/ip.h>
 #include <net/tcp.h>
-#include <net/tcp_impl.h>
-#include <slab.h>
-#include <socket.h>
-#include <string.h>
-#include <debug.h>
-
-/* String array used to display different TCP states */
-const char * const tcp_state_str[] = {
-  "CLOSED",      
-  "LISTEN",      
-  "SYN_SENT",    
-  "SYN_RCVD",    
-  "ESTABLISHED", 
-  "FIN_WAIT_1",  
-  "FIN_WAIT_2",  
-  "CLOSE_WAIT",  
-  "CLOSING",     
-  "LAST_ACK",    
-  "TIME_WAIT"   
+
+/* Must correspond to the enumeration in tcp.h */
+static char *tcpstates[] = {
+       "Closed", "Listen", "Syn_sent",
+       "Established", "Finwait1", "Finwait2", "Close_wait",
+       "Closing", "Last_ack", "Time_wait"
 };
 
-const uint8_t tcp_backoff[13] =
-    { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7};
- /* Times per slowtmr hits */
-const uint8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 };
-
-struct tcp_pcb *tcp_pcbs;
-
-/** List of all TCP PCBs bound but not yet (connected || listening) */
-struct tcp_pcb *tcp_bound_pcbs;
-/** List of all TCP PCBs in LISTEN state */
-union tcp_listen_pcbs_t tcp_listen_pcbs;
-/** List of all TCP PCBs that are in a state in which
- * they accept or send data. */
-struct tcp_pcb *tcp_active_pcbs;
-/** List of all TCP PCBs in TIME-WAIT state */
-struct tcp_pcb *tcp_tw_pcbs;
-
-#define NUM_TCP_PCB_LISTS               4
-#define NUM_TCP_PCB_LISTS_NO_TIME_WAIT  3
-/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */
-struct tcp_pcb **tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs,
-  &tcp_active_pcbs, &tcp_tw_pcbs};
-
-/** Timer counter to handle calling slow-timer from tcp_tmr() */ 
-static uint8_t tcp_timer;
-static uint16_t tcp_new_port(void);
-
-/** Only used for temporary storage. */
-struct tcp_pcb *tcp_tmp_pcb;
-
-/* Incremented every coarse grained timer shot (typically every 500 ms). */
-uint32_t tcp_ticks;
-uint16_t tcp_port_num = SOCKET_PORT_START;
-
-static uint16_t tcp_new_port(void);
-/**
- * Abandons a connection and optionally sends a RST to the remote
- * host.  Deletes the local protocol control block. This is done when
- * a connection is killed because of shortage of memory.
- *
- * @param pcb the tcp_pcb to abort
- * @param reset boolean to indicate whether a reset should be sent
- */
-void
-tcp_abandon(struct tcp_pcb *pcb, int reset)
-{
-  uint32_t seqno, ackno;
-  uint16_t remote_port, local_port;
-  ip_addr_t remote_ip, local_ip;
-#if LWIP_CALLBACK_API  
-  tcp_err_fn errf;
-#endif /* LWIP_CALLBACK_API */
-  void *errf_arg;
-
-  /* pcb->state LISTEN not allowed here */
-  LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs",
-    pcb->state != LISTEN);
-  /* Figure out on which TCP PCB list we are, and remove us. If we
-     are in an active state, call the receive function associated with
-     the PCB with a NULL argument, and send an RST to the remote end. */
-  if (pcb->state == TIME_WAIT) {
-    tcp_pcb_remove(&tcp_tw_pcbs, pcb);
-               kmem_cache_free(tcp_pcb_kcache, (void*)pcb);
-  } else {
-    seqno = pcb->snd_nxt;
-    ackno = pcb->rcv_nxt;
-    ip_addr_copy(local_ip, pcb->local_ip);
-    ip_addr_copy(remote_ip, pcb->remote_ip);
-    local_port = pcb->local_port;
-    remote_port = pcb->remote_port;
-#if LWIP_CALLBACK_API
-    errf = pcb->errf;
-#endif /* LWIP_CALLBACK_API */
-    errf_arg = pcb->callback_arg;
-    tcp_pcb_remove(&tcp_active_pcbs, pcb);
-    if (pcb->unacked != NULL) {
-      tcp_segs_free(pcb->unacked);
-    }
-    if (pcb->unsent != NULL) {
-      tcp_segs_free(pcb->unsent);
-    }
-#if TCP_QUEUE_OOSEQ    
-    if (pcb->ooseq != NULL) {
-      tcp_segs_free(pcb->ooseq);
-    }
-#endif /* TCP_QUEUE_OOSEQ */
-               kmem_cache_free(tcp_pcb_kcache, (void*)pcb);
-    TCP_EVENT_ERR(errf, errf_arg, ECONNABORTED);
-    if (reset) {
-      LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n"));
-      tcp_rst(seqno, ackno, &local_ip, &remote_ip, local_port, remote_port);
-    }
-  }
-}
-
-/**
- * Aborts the connection by sending a RST (reset) segment to the remote
- * host. The pcb is deallocated. This function never fails.
- *
- * ATTENTION: When calling this from one of the TCP callbacks, make
- * sure you always return ECONNABORTED (and never return ECONNABORTED otherwise
- * or you will risk accessing deallocated memory or memory leaks!
+static int tcp_irtt = DEF_RTT;                 /* Initial guess at round trip time */
+static uint16_t tcp_mss = DEF_MSS;             /* Maximum segment size to be sent */
+
+/* Must correspond to the enumeration in tcp.h */
+static char *statnames[] = {
+       [MaxConn] "MaxConn",
+       [ActiveOpens] "ActiveOpens",
+       [PassiveOpens] "PassiveOpens",
+       [EstabResets] "EstabResets",
+       [CurrEstab] "CurrEstab",
+       [InSegs] "InSegs",
+       [OutSegs] "OutSegs",
+       [RetransSegs] "RetransSegs",
+       [RetransTimeouts] "RetransTimeouts",
+       [InErrs] "InErrs",
+       [OutRsts] "OutRsts",
+       [CsumErrs] "CsumErrs",
+       [HlenErrs] "HlenErrs",
+       [LenErrs] "LenErrs",
+       [OutOfOrder] "OutOfOrder",
+};
+
+/*
+ *  Setting tcpporthogdefense to non-zero enables Dong Lin's
+ *  solution to hijacked systems staking out port's as a form
+ *  of DoS attack.
  *
- * @param pcb the tcp pcb to abort
+ *  To avoid stateless Conv hogs, we pick a sequence number at random.  If
+ *  it that number gets acked by the other end, we shut down the connection.
+ *  Look for tcpporthogedefense in the code.
  */
-void
-tcp_abort(struct tcp_pcb *pcb)
+static int tcpporthogdefense = 0;
+
+static int addreseq(Tcpctl *, struct tcppriv *, Tcp *, struct block *,
+                    uint16_t);
+static void getreseq(Tcpctl *, Tcp *, struct block **, uint16_t *);
+static void localclose(struct conv *, char *unused_char_p_t);
+static void procsyn(struct conv *, Tcp *);
+static void tcpiput(struct Proto *, struct Ipifc *, struct block *);
+static void tcpoutput(struct conv *);
+static int tcptrim(Tcpctl *, Tcp *, struct block **, uint16_t *);
+static void tcpstart(struct conv *, int);
+static void tcptimeout(void *);
+static void tcpsndsyn(struct conv *, Tcpctl *);
+static void tcprcvwin(struct conv *);
+static void tcpacktimer(void *);
+static void tcpkeepalive(void *);
+static void tcpsetkacounter(Tcpctl *);
+static void tcprxmit(struct conv *);
+static void tcpsettimer(Tcpctl *);
+static void tcpsynackrtt(struct conv *);
+static void tcpsetscale(struct conv *, Tcpctl *, uint16_t, uint16_t);
+static void tcp_loss_event(struct conv *s, Tcpctl *tcb);
+static uint16_t derive_payload_mss(Tcpctl *tcb);
+static void set_in_flight(Tcpctl *tcb);
+
+static void limborexmit(struct Proto *);
+static void limbo(struct conv *, uint8_t *unused_uint8_p_t, uint8_t *, Tcp *,
+                                 int);
+
+static void tcpsetstate(struct conv *s, uint8_t newstate)
 {
-  tcp_abandon(pcb, 1);
+       Tcpctl *tcb;
+       uint8_t oldstate;
+       struct tcppriv *tpriv;
+
+       tpriv = s->p->priv;
+
+       tcb = (Tcpctl *) s->ptcl;
+
+       oldstate = tcb->state;
+       if (oldstate == newstate)
+               return;
+
+       if (oldstate == Established)
+               tpriv->stats[CurrEstab]--;
+       if (newstate == Established)
+               tpriv->stats[CurrEstab]++;
+
+       /**
+       print( "%d/%d %s->%s CurrEstab=%d\n", s->lport, s->rport,
+               tcpstates[oldstate], tcpstates[newstate], tpriv->tstats.tcpCurrEstab );
+       **/
+
+       switch (newstate) {
+               case Closed:
+                       qclose(s->rq);
+                       qclose(s->wq);
+                       qclose(s->eq);
+                       break;
+
+               case Close_wait:        /* Remote closes */
+                       qhangup(s->rq, NULL);
+                       break;
+       }
+
+       tcb->state = newstate;
+
+       if (oldstate == Syn_sent && newstate != Closed)
+               Fsconnected(s, NULL);
 }
 
+static void tcpconnect(struct conv *c, char **argv, int argc)
+{
+       Fsstdconnect(c, argv, argc);
+       tcpstart(c, TCP_CONNECT);
+}
 
-/** 
- * Update the state that tracks the available window space to advertise.
- *
- * Returns how much extra window would be advertised if we sent an
- * update now.
- */
-uint32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb)
-{
-  uint32_t new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd;
-
-  if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + MIN((TCP_WND / 2), pcb->mss))) {
-    /* we can advertise more window */
-    pcb->rcv_ann_wnd = pcb->rcv_wnd;
-    return new_right_edge - pcb->rcv_ann_right_edge;
-  } else {
-    if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) {
-      /* Can happen due to other end sending out of advertised window,
-       * but within actual available (but not yet advertised) window */
-      pcb->rcv_ann_wnd = 0;
-    } else {
-      /* keep the right edge of window constant */
-      uint32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt;
-      pcb->rcv_ann_wnd = (uint16_t)new_rcv_ann_wnd;
-    }
-    return 0;
-  }
-}
-
-/**
- * Kills the oldest connection that is in TIME_WAIT state.
- * Called from tcp_alloc() if no more connections are available.
- */
-static void
-tcp_kill_timewait(void)
-{
-  struct tcp_pcb *pcb, *inactive;
-  uint32_t inactivity;
-
-  inactivity = 0;
-  inactive = NULL;
-  /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */
-  for(pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
-    if ((uint32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
-      inactivity = tcp_ticks - pcb->tmr;
-      inactive = pcb;
-    }
-  }
-  if (inactive != NULL) {
-    LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n",
-           (void *)inactive, inactivity));
-    tcp_abort(inactive);
-  }
-}
-
-/**
- * Kills the oldest active connection that has lower priority than prio.
- *
- * @param prio minimum priority
- */
-static void
-tcp_kill_prio(uint8_t prio)
-{
-  struct tcp_pcb *pcb, *inactive;
-  uint32_t inactivity;
-  uint8_t mprio;
-
-
-  mprio = TCP_PRIO_MAX;
-  
-  /* We kill the oldest active connection that has lower priority than prio. */
-  inactivity = 0;
-  inactive = NULL;
-  for(pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
-    if (pcb->prio <= prio &&
-       pcb->prio <= mprio &&
-       (uint32_t)(tcp_ticks - pcb->tmr) >= inactivity) {
-      inactivity = tcp_ticks - pcb->tmr;
-      inactive = pcb;
-      mprio = pcb->prio;
-    }
-  }
-  if (inactive != NULL) {
-    LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n",
-           (void *)inactive, inactivity));
-    tcp_abort(inactive);
-  }
-}
-/**
- * This function should be called by the application when it has
- * processed the data. The purpose is to advertise a larger window
- * when the data has been processed.
- *
- * @param pcb the tcp_pcb for which data is read
- * @param len the amount of bytes that have been read by the application
- */
-void
-tcp_recved(struct tcp_pcb *pcb, uint16_t len)
+static int tcpstate(struct conv *c, char *state, int n)
 {
-  int wnd_inflation;
+       Tcpctl *s;
+
+       s = (Tcpctl *) (c->ptcl);
+
+       return snprintf(state, n,
+                                       "%s qin %d qout %d srtt %d mdev %d cwin %u swin %u>>%d rwin %u>>%d timer.start %llu timer.count %llu rerecv %d katimer.start %d katimer.count %d\n",
+                                       tcpstates[s->state],
+                                       c->rq ? qlen(c->rq) : 0,
+                                       c->wq ? qlen(c->wq) : 0,
+                                       s->srtt, s->mdev,
+                                       s->cwind, s->snd.wnd, s->rcv.scale, s->rcv.wnd,
+                                       s->snd.scale, s->timer.start, s->timer.count, s->rerecv,
+                                       s->katimer.start, s->katimer.count);
+}
+
+static int tcpinuse(struct conv *c)
+{
+       Tcpctl *s;
 
-  check(len <= 0xffff - pcb->rcv_wnd);
+       s = (Tcpctl *) (c->ptcl);
+       return s->state != Closed;
+}
 
-  pcb->rcv_wnd += len;
-  if (pcb->rcv_wnd > TCP_WND) {
-    pcb->rcv_wnd = TCP_WND;
-  }
+static void tcpannounce(struct conv *c, char **argv, int argc)
+{
+       Fsstdannounce(c, argv, argc);
+       tcpstart(c, TCP_LISTEN);
+       Fsconnected(c, NULL);
+}
 
-  wnd_inflation = tcp_update_rcv_ann_wnd(pcb);
+static void tcpbypass(struct conv *cv, char **argv, int argc)
+{
+       struct tcppriv *tpriv = cv->p->priv;
 
-  /* If the change in the right edge of window is significant (default
-   * watermark is TCP_WND/4), then send an explicit update now.
-   * Otherwise wait for a packet to be sent in the normal course of
-   * events (or more window to be available later) */
-  if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) {
-    tcp_ack_now(pcb);
-    //XXX: tcp_output(pcb);
-  }
+       Fsstdbypass(cv, argv, argc);
+       iphtadd(&tpriv->ht, cv);
+}
 
-  printk("tcp_recved: received %d  bytes, wnd %d (%d).\n",
-         len, pcb->rcv_wnd, TCP_WND - pcb->rcv_wnd);
+static void tcpshutdown(struct conv *c, int how)
+{
+       Tcpctl *tcb = (Tcpctl*)c->ptcl;
+
+       /* Do nothing for the read side */
+       if (how == SHUT_RD)
+               return;
+       /* Sends a FIN.  If we're in another state (like Listen), we'll run into
+        * issues, since we'll never send the FIN.  We'll be shutdown on our end,
+        * but we'll never tell the distant end.  Might just be an app issue. */
+       switch (tcb->state) {
+       case Established:
+               tcb->flgcnt++;
+               tcpsetstate(c, Finwait1);
+               tcpoutput(c);
+               break;
+       }
 }
 
-/**
- * Default receive callback that is called if the user didn't register
- * a recv callback for the pcb.
+/*
+ *  tcpclose is always called with the q locked
  */
-error_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, error_t err) {
-       int8_t irq_state = 0;
-       if (pcb == NULL || pcb->pcbsock == NULL) {
-               pbuf_free(p);
-               return -1;
+static void tcpclose(struct conv *c)
+{
+       Tcpctl *tcb;
+
+       tcb = (Tcpctl *) c->ptcl;
+
+       qhangup(c->rq, NULL);
+       qhangup(c->wq, NULL);
+       qhangup(c->eq, NULL);
+       qflush(c->rq);
+
+       switch (tcb->state) {
+               case Listen:
+                       /*
+                        *  reset any incoming calls to this listener
+                        */
+                       Fsconnected(c, "Hangup");
+
+                       localclose(c, NULL);
+                       break;
+               case Closed:
+               case Syn_sent:
+                       localclose(c, NULL);
+                       break;
+               case Established:
+                       tcb->flgcnt++;
+                       tcpsetstate(c, Finwait1);
+                       tcpoutput(c);
+                       break;
+               case Close_wait:
+                       tcb->flgcnt++;
+                       tcpsetstate(c, Last_ack);
+                       tcpoutput(c);
+                       break;
+       }
+}
+
+static void tcpkick(void *x)
+{
+       ERRSTACK(1);
+       struct conv *s = x;
+       Tcpctl *tcb;
+
+       tcb = (Tcpctl *) s->ptcl;
+
+       qlock(&s->qlock);
+       if (waserror()) {
+               qunlock(&s->qlock);
+               nexterror();
+       }
+
+       switch (tcb->state) {
+               case Syn_sent:
+               case Established:
+               case Close_wait:
+                       /*
+                        * Push data
+                        */
+                       tcprcvwin(s);
+                       tcpoutput(s);
+                       break;
+               default:
+                       localclose(s, "Hangup");
+                       break;
+       }
+
+       qunlock(&s->qlock);
+       poperror();
+}
+
+static void tcprcvwin(struct conv *s)
+{
+       /* Call with tcb locked */
+       int w;
+       Tcpctl *tcb;
+
+       tcb = (Tcpctl *) s->ptcl;
+       w = tcb->window - qlen(s->rq);
+       if (w < 0)
+               w = 0;
+
+       /* RFC 813: Avoid SWS.  We'll always reduce the window (because the qio
+        * increased - that's legit), and we'll always advertise the window
+        * increases (corresponding to qio drains) when those are greater than MSS.
+        * But we don't advertise increases less than MSS.
+        *
+        * Note we don't shrink the window at all - that'll result in tcptrim()
+        * dropping packets that were sent before the sender gets our update. */
+       if ((w < tcb->rcv.wnd) || (w >= tcb->mss))
+               tcb->rcv.wnd = w;
+       /* We've delayed sending an update to rcv.wnd, and we might never get
+        * another ACK to drive the TCP stack after the qio is drained.  We could
+        * replace this stuff with qio kicks or callbacks, but that might be
+        * trickier with the MSS limitation.  (and 'edge' isn't empty or not). */
+       if (w < tcb->mss)
+               tcb->rcv.blocked = 1;
+}
+
+static void tcpacktimer(void *v)
+{
+       ERRSTACK(1);
+       Tcpctl *tcb;
+       struct conv *s;
+
+       s = v;
+       tcb = (Tcpctl *) s->ptcl;
+
+       qlock(&s->qlock);
+       if (waserror()) {
+               qunlock(&s->qlock);
+               nexterror();
+       }
+       if (tcb->state != Closed) {
+               tcb->flags |= FORCE;
+               tcprcvwin(s);
+               tcpoutput(s);
        }
-  if (p != NULL && pcb != NULL) {
-               // notify that we have recved and increase the recv window
-               // attach it to socket
-               struct socket *sock = pcb->pcbsock;
-               // TODO: attach_pbuf needs to return stuff that can not fit in the buffer right now
-               attach_pbuf(p, &sock->recv_buff);
-               struct kthread *kthread;
-               /* First notify any blocking recv calls,
-                * then notify anyone who might be waiting in a select
-                */ 
-               // multiple people might be waiting on the socket here..
-               if (!sem_up_irqsave(&sock->sem, &irq_state)) {
-                       // wake up all waiters
-                       struct semaphore_entry *sentry, *sentry_tmp;
-                       spin_lock(&sock->waiter_lock);
-                       LIST_FOREACH_SAFE(sentry, &sock->waiters, link, sentry_tmp) {
-                               sem_up_irqsave(&sentry->sem, &irq_state);
-                               LIST_REMOVE(sentry, link);
-                               /* do not need to free since all the sentry are stack-based vars
-                                * */
+       qunlock(&s->qlock);
+       poperror();
+}
+
+static void tcpcreate(struct conv *c)
+{
+       /* We don't use qio limits.  Instead, TCP manages flow control on its own.
+        * We only use qpassnolim().  Note for qio that 0 doesn't mean no limit. */
+       c->rq = qopen(0, Qcoalesce, 0, 0);
+       c->wq = qopen(8 * QMAX, Qkick, tcpkick, c);
+}
+
+static void timerstate(struct tcppriv *priv, Tcptimer *t, int newstate)
+{
+       if (newstate != TcptimerON) {
+               if (t->state == TcptimerON) {
+                       // unchain
+                       if (priv->timers == t) {
+                               priv->timers = t->next;
+                               if (t->prev != NULL)
+                                       panic("timerstate1");
                        }
-                       spin_unlock(&sock->waiter_lock);
+                       if (t->next)
+                               t->next->prev = t->prev;
+                       if (t->prev)
+                               t->prev->next = t->next;
+                       t->next = t->prev = NULL;
+               }
+       } else {
+               if (t->state != TcptimerON) {
+                       // chain
+                       if (t->prev != NULL || t->next != NULL)
+                               panic("timerstate2");
+                       t->prev = NULL;
+                       t->next = priv->timers;
+                       if (t->next)
+                               t->next->prev = t;
+                       priv->timers = t;
                }
        }
-       printk ("received total length tcp %d\n", p->tot_len);
-       tcp_recved(pcb, p->tot_len);
-       // decref
-       pbuf_free(p);
-  return ESUCCESS;
+       t->state = newstate;
 }
 
+static void tcpackproc(void *a)
+{
+       ERRSTACK(1);
+       Tcptimer *t, *tp, *timeo;
+       struct Proto *tcp;
+       struct tcppriv *priv;
+       int loop;
+
+       tcp = a;
+       priv = tcp->priv;
+
+       for (;;) {
+               kthread_usleep(MSPTICK * 1000);
+
+               qlock(&priv->tl);
+               timeo = NULL;
+               loop = 0;
+               for (t = priv->timers; t != NULL; t = tp) {
+                       if (loop++ > 10000)
+                               panic("tcpackproc1");
+                       tp = t->next;
+                       /* this is a little odd.  overall, we wake up once per 'tick' (50ms,
+                        * whatever).  then, we decrement count.  so the timer val is in
+                        * units of 50 ms.  the timer list isn't sorted either.  once
+                        * someone expires, we get moved to another LL, local, and we fire
+                        * those alarms.
+                        *
+                        * the best anyone could do would be 50 ms granularity.
+                        *
+                        * if things are slow, you could skew later too.
+                        *
+                        * actually, you're expected value is 25ms for the first count.  so
+                        * whatever your timer.start is, your wait time is start * 50 - 25.
+                        *              which is why we wait 25 ms to open up our window again.
+                        *
+                        * might be issues with concurrency.  once the alarm is set to done
+                        * and yanked off the list, what's to stop a concurrent setter from
+                        * putting it back on the list and setting TcptimerON?
+                        *              there's a lot of lockless peeks at the timer.state
+                        *
+                        * probably be better served with a kthread timer chain
+                        *              one assumption with the timerchain stuff is that the source
+                        *              is an IRQ, and thus IRQ context matters, etc.
+                        *
+                        *              with a kth tchain, we're in kth context already.  and you
+                        *              probably don't want to send another RKM for each timer.
+                        *              unless the locking matters.
+                        *
+                        *              interesting - even the pcpu tchains - should those be a
+                        *              per-core kth?  does any alarm need to run from IRQ ctx?
+                        *                              maybe.
+                        * */
+                       if (t->state == TcptimerON) {
+                               t->count--;
+                               if (t->count == 0) {
+                                       timerstate(priv, t, TcptimerDONE);
+                                       t->readynext = timeo;
+                                       timeo = t;
+                               }
+                       }
+               }
+               qunlock(&priv->tl);
+
+               loop = 0;
+               for (t = timeo; t != NULL; t = t->readynext) {
+                       if (loop++ > 10000)
+                               panic("tcpackproc2");
+                       if (t->state == TcptimerDONE && t->func != NULL) {
+                               /* discard error style */
+                               if (!waserror())
+                                       (*t->func) (t->arg);
+                               poperror();
+                       }
+               }
 
-/**
- * Creates a new TCP protocol control block but doesn't place it on
- * any of the TCP PCB lists.
- * The pcb is not put on any list until binding using tcp_bind().
- *
- * @internal: Maybe there should be a idle TCP PCB list where these
- * PCBs are put on. Port reservation using tcp_bind() is implemented but
- * allocated pcbs that are not bound can't be killed automatically if wanting
- * to allocate a pcb with higher prio (@see tcp_kill_prio())
- *
- * @return a new tcp_pcb that initially is in state CLOSED
- */
-struct tcp_pcb* tcp_new(void) {
-  return tcp_alloc(TCP_PRIO_NORMAL);
+               limborexmit(tcp);
+       }
 }
 
-/**
- * Calculates a new initial sequence number for new connections.
- * TODO: Consider use a secure pseduo ISN
- *
- * @return uint32_t pseudo random sequence number
- */
-uint32_t tcp_next_iss(void)
+static void tcpgo(struct tcppriv *priv, Tcptimer *t)
 {
-  static uint32_t iss = 6510;
-  
-  iss += tcp_ticks;       /* XXX */
-  return iss;
+       if (t == NULL || t->start == 0)
+               return;
+
+       qlock(&priv->tl);
+       t->count = t->start;
+       timerstate(priv, t, TcptimerON);
+       qunlock(&priv->tl);
 }
 
-/**
- * Allocate a new tcp_pcb structure.
- *
- * @param prio priority for the new pcb
- * @return a new tcp_pcb that initially is in state CLOSED
- */
-struct tcp_pcb* tcp_alloc(uint8_t prio) {
-  struct tcp_pcb *pcb;
-  uint32_t iss;
-  pcb = kmem_cache_alloc(tcp_pcb_kcache, 0);
-  if (pcb == NULL) {
-               /* Try killing oldest connection in TIME-WAIT. */
-               printd("tcp_alloc: killing off oldest TIME-WAIT connection\n");
-               tcp_kill_timewait();
-               /* Try to allocate a tcp_pcb again. */
-               pcb = (struct tcp_pcb *)kmem_cache_alloc(tcp_pcb_kcache, 0);
-               if (pcb == NULL) {
-                       /* Try killing active connections with lower priority than the new one. */
-                       printd("tcp_alloc: killing connection with prio lower than %d\n", prio);
-                       tcp_kill_prio(prio);
-                       /* Try to allocate a tcp_pcb again. */
-                       pcb = (struct tcp_pcb *)kmem_cache_alloc(tcp_pcb_kcache, 0);
-               }
-       }
-  if (pcb != NULL) {
-    memset(pcb, 0, sizeof(struct tcp_pcb));
-    pcb->prio = prio;
-    pcb->snd_buf = TCP_SND_BUF;
-    pcb->snd_queuelen = 0;
-    pcb->rcv_wnd = TCP_WND;
-    pcb->rcv_ann_wnd = TCP_WND;
-    pcb->tos = 0;
-    pcb->ttl = TCP_TTL;
-    /* As initial send MSS, we use TCP_MSS but limit it to 536.
-       The send MSS is updated when an MSS option is received. */
-    pcb->mss = (TCP_MSS > 536) ? 536 : TCP_MSS;
-    pcb->rto = 3000 / TCP_SLOW_INTERVAL;
-    pcb->sa = 0;
-    pcb->sv = 3000 / TCP_SLOW_INTERVAL;
-    pcb->rtime = -1;
-    pcb->cwnd = 1;
-    iss = tcp_next_iss();
-    pcb->snd_wl2 = iss;
-    pcb->snd_nxt = iss;
-    pcb->lastack = iss;
-    pcb->snd_lbb = iss;   
-    pcb->tmr = tcp_ticks;
-
-    pcb->polltmr = 0;
-
-/* Basically we need to use the callback api because then we can switch
- * handlers based on the state that the pcb is in. 
- */
+static void tcphalt(struct tcppriv *priv, Tcptimer *t)
+{
+       if (t == NULL)
+               return;
 
-    pcb->recv = tcp_recv_null;
-    
-    /* Init KEEPALIVE timer */
-    pcb->keep_idle  = TCP_KEEPIDLE_DEFAULT;
-    
-#if LWIP_TCP_KEEPALIVE
-    pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT;
-    pcb->keep_cnt   = TCP_KEEPCNT_DEFAULT;
-#endif /* LWIP_TCP_KEEPALIVE */
+       qlock(&priv->tl);
+       timerstate(priv, t, TcptimerOFF);
+       qunlock(&priv->tl);
+}
 
-    pcb->keep_cnt_sent = 0;
-  }
-  return pcb;
+static int backoff(int n)
+{
+       return 1 << n;
 }
 
-/**
- * A nastly hack featuring 'goto' statements that allocates a
- * new TCP local port.
- *
- * @return a new (free) local TCP port number
- */
-static uint16_t tcp_new_port(void) {
-  int i;
-  struct tcp_pcb *pcb;
-  static uint16_t port = TCP_LOCAL_PORT_RANGE_START;
-  
- again:
-  if (++port > TCP_LOCAL_PORT_RANGE_END) {
-    port = TCP_LOCAL_PORT_RANGE_START;
-  }
-  /* Check all PCB lists. */
-  for (i = 0; i < NUM_TCP_PCB_LISTS; i++) {  
-    for(pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) {
-      if (pcb->local_port == port) {
-        goto again;
-      }
-    }
-  }
-  return port;
-}
-
-
-/**
- * Binds the connection to a local portnumber and IP address. If the
- * IP address is not given (i.e., ipaddr == NULL), the IP address of
- * the outgoing network interface is used instead.
- *
- * @param pcb the tcp_pcb to bind (no check is done whether this pcb is
- *        already bound!)
- * @param ipaddr the local ip address to bind to (use IP_ADDR_ANY to bind
- *        to any local address
- * @param port the local port to bind to
- * @return ERR_USE if the port is already in use
- *         ESUCCESS if bound
- */
-error_t tcp_bind(struct tcp_pcb *pcb, const struct in_addr *ipaddr, uint16_t port) {
-  int i;
-  int max_pcb_list = NUM_TCP_PCB_LISTS;
-  struct tcp_pcb *cpcb;
-
-  LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return -EISCONN);
-
-#if SO_REUSE
-  /* Unless the REUSEADDR flag is set,
-     we have to check the pcbs in TIME-WAIT state, also.
-     We do not dump TIME_WAIT pcb's; they can still be matched by incoming
-     packets using both local and remote IP addresses and ports to distinguish.
-   */
-  if ((pcb->so_options & SO_REUSEADDR) != 0) {
-    max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT;
-  }
-#endif /* SO_REUSE */
-
-  if (port == 0) {
-    port = tcp_new_port();
-  }
-
-  /* Check if the address already is in use (on all lists) */
-  for (i = 0; i < max_pcb_list; i++) {
-    for(cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
-      if (cpcb->local_port == port) {
-#if SO_REUSE
-        /* Omit checking for the same port if both pcbs have REUSEADDR set.
-           For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in
-           tcp_connect. */
-        if (((pcb->so_options & SO_REUSEADDR) == 0) ||
-          ((cpcb->so_options & SO_REUSEADDR) == 0))
-#endif /* SO_REUSE */
-        {
-          if (ip_addr_isany(&(cpcb->local_ip)) ||
-              ip_addr_isany(ipaddr) ||
-              ip_addr_cmp(&(cpcb->local_ip), ipaddr)) {
-            return EADDRINUSE;
-          }
-        }
-      }
-    }
-  }
-
-  if (!ip_addr_isany(ipaddr)) {
-    pcb->local_ip = *ipaddr;
-  }
-  pcb->local_port = port;
-  TCP_REG(&tcp_bound_pcbs, pcb);
-  LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port));
-  return 0;
-}
-
-/**
- * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously
- * "refused" by upper layer (application) and sends delayed ACKs.
- *
- * Automatically called from tcp_tmr().
- */
-void tcp_fasttmr(void) {
-  struct tcp_pcb *pcb = tcp_active_pcbs;
-
-  while(pcb != NULL) {
-    struct tcp_pcb *next = pcb->next;
-    /* If there is data which was previously "refused" by upper layer */
-    if (pcb->refused_data != NULL) {
-      /* Notify again application with data previously received. */
-      error_t err;
-      LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_fasttmr: notify kept packet\n"));
-      TCP_EVENT_RECV(pcb, pcb->refused_data, ESUCCESS, err);
-      if (err == ESUCCESS) {
-        pcb->refused_data = NULL;
-      } else if (err == ECONNABORTED) {
-        /* if err == ECONNABORTED, 'pcb' is already deallocated */
-        pcb = NULL;
-      }
-    }
-
-    /* send delayed ACKs */
-    if (pcb && (pcb->flags & TF_ACK_DELAY)) {
-      printd("tcp_fasttmr: delayed ACK\n");
-      tcp_ack_now(pcb);
-      // XXX: tcp_output(pcb);
-      pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
-    }
-
-    pcb = next;
-  }
-}
-
-/**
- * Called periodically to dispatch TCP timers.
- *
- */
-void tcp_tmr(void) {
-       /* Call tcp_fasttmr() every 250 ms */
-  tcp_fasttmr();
+static void localclose(struct conv *s, char *reason)
+{
+       /* called with tcb locked */
+       Tcpctl *tcb;
+       Reseq *rp, *rp1;
+       struct tcppriv *tpriv;
+
+       tpriv = s->p->priv;
+       tcb = (Tcpctl *) s->ptcl;
+
+       iphtrem(&tpriv->ht, s);
+
+       tcphalt(tpriv, &tcb->timer);
+       tcphalt(tpriv, &tcb->rtt_timer);
+       tcphalt(tpriv, &tcb->acktimer);
+       tcphalt(tpriv, &tcb->katimer);
+
+       /* Flush reassembly queue; nothing more can arrive */
+       for (rp = tcb->reseq; rp != NULL; rp = rp1) {
+               rp1 = rp->next;
+               freeblist(rp->bp);
+               kfree(rp);
+       }
+       tcb->reseq = NULL;
+
+       if (tcb->state == Syn_sent)
+               Fsconnected(s, reason);
 
-  if (++tcp_timer & 1) {
-    /* Call tcp_tmr() every 500 ms, i.e., every other timer
-       tcp_tmr() is called. */
-    tcp_slowtmr();
-  }
+       qhangup(s->rq, reason);
+       qhangup(s->wq, reason);
+
+       tcpsetstate(s, Closed);
+
+       /* listener will check the rq state */
+       if (s->state == Announced)
+               rendez_wakeup(&s->listenr);
 }
 
-/**
- * Closes the TX side of a connection held by the PCB.
- * For tcp_close(), a RST is sent if the application didn't receive all data
- * (tcp_recved() not called for all data passed to recv callback).
- *
- * Listening pcbs are freed and may not be referenced any more.
- * Connection pcbs are freed if not yet connected and may not be referenced
- * any more. If a connection is established (at least SYN received or in
- * a closing state), the connection is closed, and put in a closing state.
- * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
- * unsafe to reference it.
- *
- * @param pcb the tcp_pcb to close
- * @return ESUCCESS if connection has been closed
- *         another error_t if closing failed and pcb is not freed
- */
-static error_t
-tcp_close_shutdown(struct tcp_pcb *pcb, uint8_t rst_on_unacked_data)
-{
-  error_t err;
-
-  if (rst_on_unacked_data && (pcb->state != LISTEN)) {
-    if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND)) {
-      /* Not all data received by application, send RST to tell the remote
-         side about this. */
-      LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED);
-
-      /* don't call tcp_abort here: we must not deallocate the pcb since
-         that might not be expected when calling tcp_close */
-      tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
-        pcb->local_port, pcb->remote_port);
-
-      tcp_pcb_purge(pcb);
-
-      /* TODO: to which state do we move now? */
-
-      /* move to TIME_WAIT since we close actively */
-      TCP_RMV(&tcp_active_pcbs, pcb);
-      pcb->state = TIME_WAIT;
-      TCP_REG(&tcp_tw_pcbs, pcb);
-
-      return ESUCCESS;
-    }
-  }
-
-  switch (pcb->state) {
-  case CLOSED:
-    /* Closing a pcb in the CLOSED state might seem erroneous,
-     * however, it is in this state once allocated and as yet unused
-     * and the user needs some way to free it should the need arise.
-     * Calling tcp_close() with a pcb that has already been closed, (i.e. twice)
-     * or for a pcb that has been used and then entered the CLOSED state 
-     * is erroneous, but this should never happen as the pcb has in those cases
-     * been freed, and so any remaining handles are bogus. */
-    err = ESUCCESS;
-    TCP_RMV(&tcp_bound_pcbs, pcb);
-               kmem_cache_free(tcp_pcb_kcache, (void*)pcb);
-    pcb = NULL;
-    break;
-  case LISTEN:
-    err = ESUCCESS;
-    tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb);
-               kmem_cache_free(tcp_pcb_kcache, (void*)pcb);
-    pcb = NULL;
-    break;
-  case SYN_SENT:
-    err = ESUCCESS;
-    tcp_pcb_remove(&tcp_active_pcbs, pcb);
-               kmem_cache_free(tcp_pcb_kcache, (void*)pcb);
-    pcb = NULL;
-    break;
-  case SYN_RCVD:
-    err = tcp_send_fin(pcb);
-    if (err == ESUCCESS) {
-      pcb->state = FIN_WAIT_1;
-    }
-    break;
-  case ESTABLISHED:
-    err = tcp_send_fin(pcb);
-    if (err == ESUCCESS) {
-      pcb->state = FIN_WAIT_1;
-    }
-    break;
-  case CLOSE_WAIT:
-    err = tcp_send_fin(pcb);
-    if (err == ESUCCESS) {
-      pcb->state = LAST_ACK;
-    }
-    break;
-  default:
-    /* Has already been closed, do nothing. */
-    err = ESUCCESS;
-    pcb = NULL;
-    break;
-  }
-
-  if (pcb != NULL && err == ESUCCESS) {
-    /* To ensure all data has been sent when tcp_close returns, we have
-       to make sure tcp_output doesn't fail.
-       Since we don't really have to ensure all data has been sent when tcp_close
-       returns (unsent data is sent from tcp timer functions, also), we don't care
-       for the return value of tcp_output for now. */
-    /* @todo: When implementing SO_LINGER, this must be changed somehow:
-       If SOF_LINGER is set, the data should be sent and acked before close returns.
-       This can only be valid for sequential APIs, not for the raw API. */
-    tcp_output(pcb);
-  }
-  return err;
-}
-
-/**
- * Closes the connection held by the PCB.
- *
- * Listening pcbs are freed and may not be referenced any more.
- * Connection pcbs are freed if not yet connected and may not be referenced
- * any more. If a connection is established (at least SYN received or in
- * a closing state), the connection is closed, and put in a closing state.
- * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
- * unsafe to reference it (unless an error is returned).
- *
- * @param pcb the tcp_pcb to close
- * @return ESUCCESS if connection has been closed
- *         another error_t if closing failed and pcb is not freed
- */
-error_t
-tcp_close(struct tcp_pcb *pcb)
+/* mtu (- TCP + IP hdr len) of 1st hop */
+static int tcpmtu(struct Ipifc *ifc, int version, int *scale)
 {
-#if TCP_DEBUG
-  LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in "));
-  tcp_debug_print_state(pcb->state);
-#endif /* TCP_DEBUG */
+       int mtu;
+
+       switch (version) {
+               default:
+               case V4:
+                       mtu = DEF_MSS;
+                       if (ifc != NULL)
+                               mtu = ifc->maxtu - ifc->m->hsize - (TCP4_PKT + TCP4_HDRSIZE);
+                       break;
+               case V6:
+                       mtu = DEF_MSS6;
+                       if (ifc != NULL)
+                               mtu = ifc->maxtu - ifc->m->hsize - (TCP6_PKT + TCP6_HDRSIZE);
+                       break;
+       }
+       *scale = HaveWS | 7;
 
-  if (pcb->state != LISTEN) {
-    /* Set a flag not to receive any more data... */
-    pcb->flags |= TF_RXCLOSED;
-  }
-  /* ... and close */
-  return tcp_close_shutdown(pcb, 1);
+       return mtu;
 }
 
-/**
- * Causes all or part of a full-duplex connection of this PCB to be shut down.
- * This doesn't deallocate the PCB!
- *
- * @param pcb PCB to shutdown
- * @param shut_rx shut down receive side if this is != 0
- * @param shut_tx shut down send side if this is != 0
- * @return ESUCCESS if shutdown succeeded (or the PCB has already been shut down)
- *         another error_t on error.
- */
-error_t
-tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx)
-{
-  if (pcb->state == LISTEN) {
-    return ENOTCONN;
-  }
-  if (shut_rx) {
-    /* shut down the receive side: free buffered data... */
-    if (pcb->refused_data != NULL) {
-      pbuf_free(pcb->refused_data);
-      pcb->refused_data = NULL;
-    }
-    /* ... and set a flag not to receive any more data */
-    pcb->flags |= TF_RXCLOSED;
-  }
-  if (shut_tx) {
-    /* This can't happen twice since if it succeeds, the pcb's state is changed.
-       Only close in these states as the others directly deallocate the PCB */
-    switch (pcb->state) {
-  case SYN_RCVD:
-  case ESTABLISHED:
-  case CLOSE_WAIT:
-    return tcp_close_shutdown(pcb, 0);
-  default:
-    /* don't shut down other states */
-    break;
-    }
-  }
-  /* @todo: return another error_t if not in correct state or already shut? */
-  return ESUCCESS;
-}
-
-/**
- * Default accept callback if no accept callback is specified by the user.
- */
-static error_t
-tcp_accept_null(void *arg, struct tcp_pcb *pcb, error_t err)
+static void tcb_check_tso(Tcpctl *tcb)
 {
-       //XXX: IMPLEMENT ACCEPT
+       /* This can happen if the netdev isn't up yet. */
+       if (!tcb->ifc)
+               return;
+       if (tcb->ifc->feat & NETF_TSO)
+               tcb->flags |= TSO;
+       else
+               tcb->flags &= ~TSO;
+}
+
+static void inittcpctl(struct conv *s, int mode)
+{
+       Tcpctl *tcb;
+       Tcp4hdr *h4;
+       Tcp6hdr *h6;
+       int mss;
+
+       tcb = (Tcpctl *) s->ptcl;
+
+       memset(tcb, 0, sizeof(Tcpctl));
+
+       tcb->ssthresh = UINT32_MAX;
+       tcb->srtt = tcp_irtt;
+       tcb->mdev = 0;
+
+       /* setup timers */
+       tcb->timer.start = tcp_irtt / MSPTICK;
+       tcb->timer.func = tcptimeout;
+       tcb->timer.arg = s;
+       tcb->rtt_timer.start = MAX_TIME;
+       tcb->acktimer.start = TCP_ACK / MSPTICK;
+       tcb->acktimer.func = tcpacktimer;
+       tcb->acktimer.arg = s;
+       tcb->katimer.start = DEF_KAT / MSPTICK;
+       tcb->katimer.func = tcpkeepalive;
+       tcb->katimer.arg = s;
+
+       mss = DEF_MSS;
+
+       /* create a prototype(pseudo) header */
+       if (mode != TCP_LISTEN) {
+               if (ipcmp(s->laddr, IPnoaddr) == 0)
+                       findlocalip(s->p->f, s->laddr, s->raddr);
+
+               switch (s->ipversion) {
+                       case V4:
+                               h4 = &tcb->protohdr.tcp4hdr;
+                               memset(h4, 0, sizeof(*h4));
+                               h4->proto = IP_TCPPROTO;
+                               hnputs(h4->tcpsport, s->lport);
+                               hnputs(h4->tcpdport, s->rport);
+                               v6tov4(h4->tcpsrc, s->laddr);
+                               v6tov4(h4->tcpdst, s->raddr);
+                               break;
+                       case V6:
+                               h6 = &tcb->protohdr.tcp6hdr;
+                               memset(h6, 0, sizeof(*h6));
+                               h6->proto = IP_TCPPROTO;
+                               hnputs(h6->tcpsport, s->lport);
+                               hnputs(h6->tcpdport, s->rport);
+                               ipmove(h6->tcpsrc, s->laddr);
+                               ipmove(h6->tcpdst, s->raddr);
+                               mss = DEF_MSS6;
+                               break;
+                       default:
+                               panic("inittcpctl: version %d", s->ipversion);
+               }
+       }
 
-  return ECONNABORTED;
+       tcb->ifc = findipifc(s->p->f, s->laddr, 0);
+       tcb->mss = mss;
+       tcb->typical_mss = mss;
+       tcb->cwind = tcb->typical_mss * CWIND_SCALE;
+
+       /* default is no window scaling */
+       tcb->window = QMAX;
+       tcb->rcv.wnd = QMAX;
+       tcb->rcv.scale = 0;
+       tcb->snd.scale = 0;
+       tcb_check_tso(tcb);
 }
 
-/**
- * Set the state of the connection to be LISTEN, which means that it
- * is able to accept incoming connections. The protocol control block
- * is reallocated in order to consume less memory. Setting the
- * connection to LISTEN is an irreversible process.
- *
- * @param pcb the original tcp_pcb
- * @param backlog the incoming connections queue limit
- * @return tcp_pcb used for listening, consumes less memory.
- *
- * @note The original tcp_pcb is freed. This function therefore has to be
- *       called like this:
- *             tpcb = tcp_listen(tpcb);
- */
-struct tcp_pcb *
-tcp_listen_with_backlog(struct tcp_pcb *pcb, uint8_t backlog)
-{
-  struct tcp_pcb_listen *lpcb;
-
-  LWIP_ERROR("tcp_listen: pcb already connected", pcb->state == CLOSED, return NULL);
-
-  /* already listening? */
-  if (pcb->state == LISTEN) {
-    return pcb;
-  }
-#if SO_REUSE
-  if ((pcb->so_options & SO_REUSEADDR) != 0) {
-    /* Since SO_REUSEADDR allows reusing a local address before the pcb's usage
-       is declared (listen-/connection-pcb), we have to make sure now that
-       this port is only used once for every local IP. */
-    for(lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
-      if (lpcb->local_port == pcb->local_port) {
-        if (ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) {
-          /* this address/port is already used */
-          return NULL;
-        }
-      }
-    }
-  }
-#endif /* SO_REUSE */
-       lpcb = kmem_cache_alloc(tcp_pcb_listen_kcache, 0);
-  if (lpcb == NULL) {
-    return NULL;
-  }
-  lpcb->callback_arg = pcb->callback_arg;
-  lpcb->local_port = pcb->local_port;
-  lpcb->state = LISTEN;
-  lpcb->prio = pcb->prio;
-  lpcb->so_options = pcb->so_options;
-  lpcb->so_options |= SO_ACCEPTCONN;
-  lpcb->ttl = pcb->ttl;
-  lpcb->tos = pcb->tos;
-  ip_addr_copy(lpcb->local_ip, pcb->local_ip);
-  TCP_RMV(&tcp_bound_pcbs, pcb);
-       kmem_cache_free(tcp_pcb_kcache, (void*)pcb);
-#if LWIP_CALLBACK_API
-  lpcb->accept = tcp_accept_null;
-#endif /* LWIP_CALLBACK_API */
-#if TCP_LISTEN_BACKLOG
-  lpcb->accepts_pending = 0;
-  lpcb->backlog = (backlog ? backlog : 1);
-#endif /* TCP_LISTEN_BACKLOG */
-  TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb);
-  return (struct tcp_pcb *)lpcb;
-}
-
-
-/**
- * Connects to another host. The function given as the "connected"
- * argument will be called when the connection has been established.
- *
- * @param pcb the tcp_pcb used to establish the connection
- * @param ipaddr the remote ip address to connect to
- * @param port the remote tcp port to connect to
- * @param connected callback function to call when connected (or on error)
- * @return ERR_VAL if invalid arguments are given
- *         ESUCCESS if connect request has been sent
- *         other error_t values if connect request couldn't be sent
- */
-error_t
-tcp_connect(struct tcp_pcb *pcb, ip_addr_t *ipaddr, uint16_t port,
-      tcp_connected_fn connected)
-{
-  error_t ret;
-  uint32_t iss;
-
-  LWIP_ERROR("tcp_connect: can only connected from state CLOSED", pcb->state == CLOSED, return EISCONN);
-
-  LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port));
-  if (ipaddr != NULL) {
-    pcb->remote_ip = *ipaddr;
-  } else {
-    return ENETUNREACH;
-  }
-  pcb->remote_port = port;
-
-  /* check if we have a route to the remote host */
-  if (ip_addr_isany(&(pcb->local_ip))) {
-               // assume we have a route anywhere..
-
-    /* no local IP address set, yet. */
-    // struct netif *netif = ip_route(&(pcb->remote_ip));
-    /* Use the netif's IP address as local address. */
-               pcb->local_ip = LOCAL_IP_ADDR;
-  }
-
-  if (pcb->local_port == 0) {
-    pcb->local_port = tcp_new_port();
-  }
-#if SO_REUSE
-  if ((pcb->so_options & SO_REUSEADDR) != 0) {
-    /* Since SO_REUSEADDR allows reusing a local address, we have to make sure
-       now that the 5-tuple is unique. */
-    struct tcp_pcb *cpcb;
-    int i;
-    /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */
-    for (i = 2; i < NUM_TCP_PCB_LISTS; i++) {
-      for(cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) {
-        if ((cpcb->local_port == pcb->local_port) &&
-            (cpcb->remote_port == port) &&
-            ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) &&
-            ip_addr_cmp(&cpcb->remote_ip, ipaddr)) {
-          /* linux returns EISCONN here, but ERR_USE should be OK for us */
-          return ERR_USE;
-        }
-      }
-    }
-  }
-#endif /* SO_REUSE */
-  iss = tcp_next_iss();
-  pcb->rcv_nxt = 0;
-  pcb->snd_nxt = iss;
-  pcb->lastack = iss - 1;
-  pcb->snd_lbb = iss - 1;
-  pcb->rcv_wnd = TCP_WND;
-  pcb->rcv_ann_wnd = TCP_WND;
-  pcb->rcv_ann_right_edge = pcb->rcv_nxt;
-  pcb->snd_wnd = TCP_WND;
-  /* As initial send MSS, we use TCP_MSS but limit it to 536.
-     The send MSS is updated when an MSS option is received. */
-  pcb->mss = (TCP_MSS > 536) ? 536 : TCP_MSS;
-#if TCP_CALCULATE_EFF_SEND_MSS 
-  pcb->mss = tcp_eff_send_mss(pcb->mss, ipaddr);
-#endif /* TCP_CALCULATE_EFF_SEND_MSS */
-  pcb->cwnd = 1;
-  pcb->ssthresh = pcb->mss * 10;
-#if LWIP_CALLBACK_API
-  pcb->connected = connected;
-#else /* LWIP_CALLBACK_API */  
-#endif /* LWIP_CALLBACK_API */
-
-  /* Send a SYN together with the MSS option. */
-  ret = tcp_enqueue_flags(pcb, TCP_SYN);
-  if (ret == ESUCCESS) {
-    /* SYN segment was enqueued, changed the pcbs state now */
-    pcb->state = SYN_SENT;
-    TCP_RMV(&tcp_bound_pcbs, pcb);
-    TCP_REG(&tcp_active_pcbs, pcb);
-    //snmp_inc_tcpactiveopens();
-
-    tcp_output(pcb);
-  }
-  return ret;
-}
-
-/**
- * Called every 500 ms and implements the retransmission timer and the timer that
- * removes PCBs that have been in TIME-WAIT for enough time. It also increments
- * various timers such as the inactivity timer in each PCB.
- *
- * Automatically called from tcp_tmr().
- */
-void
-tcp_slowtmr(void)
-{
-  struct tcp_pcb *pcb, *prev;
-  uint16_t eff_wnd;
-  uint8_t pcb_remove;      /* flag if a PCB should be removed */
-  uint8_t pcb_reset;       /* flag if a RST should be sent when removing */
-  error_t err;
-
-  err = ESUCCESS;
-
-  ++tcp_ticks;
-
-  /* Steps through all of the active PCBs. */
-  prev = NULL;
-  pcb = tcp_active_pcbs;
-  if (pcb == NULL) {
-    LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n"));
-  }
-  while (pcb != NULL) {
-    LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n"));
-    LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED);
-    LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN);
-    LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT);
-
-    pcb_remove = 0;
-    pcb_reset = 0;
-
-    if (pcb->state == SYN_SENT && pcb->nrtx == TCP_SYNMAXRTX) {
-      ++pcb_remove;
-      LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n"));
-    }
-    else if (pcb->nrtx == TCP_MAXRTX) {
-      ++pcb_remove;
-      LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n"));
-    } else {
-      if (pcb->persist_backoff > 0) {
-        /* If snd_wnd is zero, use persist timer to send 1 byte probes
-         * instead of using the standard retransmission mechanism. */
-        pcb->persist_cnt++;
-        if (pcb->persist_cnt >= tcp_persist_backoff[pcb->persist_backoff-1]) {
-          pcb->persist_cnt = 0;
-          if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) {
-            pcb->persist_backoff++;
-          }
-          tcp_zero_window_probe(pcb);
-        }
-      } else {
-        /* Increase the retransmission timer if it is running */
-        if(pcb->rtime >= 0)
-          ++pcb->rtime;
-
-        if (pcb->unacked != NULL && pcb->rtime >= pcb->rto) {
-          /* Time for a retransmission. */
-          LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F
-                                      " pcb->rto %"S16_F"\n",
-                                      pcb->rtime, pcb->rto));
-
-          /* Double retransmission time-out unless we are trying to
-           * connect to somebody (i.e., we are in SYN_SENT). */
-          if (pcb->state != SYN_SENT) {
-            pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[pcb->nrtx];
-          }
-
-          /* Reset the retransmission timer. */
-          pcb->rtime = 0;
-
-          /* Reduce congestion window and ssthresh. */
-          eff_wnd = MIN(pcb->cwnd, pcb->snd_wnd);
-          pcb->ssthresh = eff_wnd >> 1;
-          if (pcb->ssthresh < (pcb->mss << 1)) {
-            pcb->ssthresh = (pcb->mss << 1);
-          }
-          pcb->cwnd = pcb->mss;
-          LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"U16_F
-                                       " ssthresh %"U16_F"\n",
-                                       pcb->cwnd, pcb->ssthresh));
-          /* The following needs to be called AFTER cwnd is set to one
-             mss - STJ */
-          tcp_rexmit_rto(pcb);
-        }
-      }
-    }
-    /* Check if this PCB has stayed too long in FIN-WAIT-2 */
-    if (pcb->state == FIN_WAIT_2) {
-      if ((uint32_t)(tcp_ticks - pcb->tmr) >
-          TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) {
-        ++pcb_remove;
-        LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n"));
-      }
-    }
-
-    /* Check if KEEPALIVE should be sent */
-    if((pcb->so_options & SO_KEEPALIVE) &&
-       ((pcb->state == ESTABLISHED) ||
-        (pcb->state == CLOSE_WAIT))) {
-#if LWIP_TCP_KEEPALIVE
-      if((uint32_t)(tcp_ticks - pcb->tmr) >
-         (pcb->keep_idle + (pcb->keep_cnt*pcb->keep_intvl))
-         / TCP_SLOW_INTERVAL)
-#else      
-      if((uint32_t)(tcp_ticks - pcb->tmr) >
-         (pcb->keep_idle + TCP_MAXIDLE) / TCP_SLOW_INTERVAL)
-#endif /* LWIP_TCP_KEEPALIVE */
-      {
-        LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to %"U16_F".%"U16_F".%"U16_F".%"U16_F".\n",
-                                ip4_addr1_16(&pcb->remote_ip), ip4_addr2_16(&pcb->remote_ip),
-                                ip4_addr3_16(&pcb->remote_ip), ip4_addr4_16(&pcb->remote_ip)));
-        
-        ++pcb_remove;
-        ++pcb_reset;
-      }
-#if LWIP_TCP_KEEPALIVE
-      else if((uint32_t)(tcp_ticks - pcb->tmr) > 
-              (pcb->keep_idle + pcb->keep_cnt_sent * pcb->keep_intvl)
-              / TCP_SLOW_INTERVAL)
-#else
-      else if((uint32_t)(tcp_ticks - pcb->tmr) > 
-              (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEPINTVL_DEFAULT) 
-              / TCP_SLOW_INTERVAL)
-#endif /* LWIP_TCP_KEEPALIVE */
-      {
-        tcp_keepalive(pcb);
-        pcb->keep_cnt_sent++;
-      }
-    }
-
-    /* If this PCB has queued out of sequence data, but has been
-       inactive for too long, will drop the data (it will eventually
-       be retransmitted). */
-#if TCP_QUEUE_OOSEQ
-    if (pcb->ooseq != NULL &&
-        (uint32_t)tcp_ticks - pcb->tmr >= pcb->rto * TCP_OOSEQ_TIMEOUT) {
-      tcp_segs_free(pcb->ooseq);
-      pcb->ooseq = NULL;
-      LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n"));
-    }
-#endif /* TCP_QUEUE_OOSEQ */
-
-    /* Check if this PCB has stayed too long in SYN-RCVD */
-    if (pcb->state == SYN_RCVD) {
-      if ((uint32_t)(tcp_ticks - pcb->tmr) >
-          TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) {
-        ++pcb_remove;
-        LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n"));
-      }
-    }
-
-    /* Check if this PCB has stayed too long in LAST-ACK */
-    if (pcb->state == LAST_ACK) {
-      if ((uint32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
-        ++pcb_remove;
-        LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n"));
-      }
-    }
-
-    /* If the PCB should be removed, do it. */
-    if (pcb_remove) {
-      struct tcp_pcb *pcb2;
-      tcp_pcb_purge(pcb);
-      /* Remove PCB from tcp_active_pcbs list. */
-      if (prev != NULL) {
-        LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs);
-        prev->next = pcb->next;
-      } else {
-        /* This PCB was the first. */
-        LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb);
-        tcp_active_pcbs = pcb->next;
-      }
-
-      TCP_EVENT_ERR(pcb->errf, pcb->callback_arg, ECONNABORTED);
-      if (pcb_reset) {
-        tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
-          pcb->local_port, pcb->remote_port);
-      }
-
-      pcb2 = pcb;
-      pcb = pcb->next;
-                       kmem_cache_free(tcp_pcb_kcache, (void*)pcb2);
-    } else {
-      /* get the 'next' element now and work with 'prev' below (in case of abort) */
-      prev = pcb;
-      pcb = pcb->next;
-
-      /* We check if we should poll the connection. */
-      ++prev->polltmr;
-      if (prev->polltmr >= prev->pollinterval) {
-        prev->polltmr = 0;
-        LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n"));
-        TCP_EVENT_POLL(prev, err);
-        /* if err == ECONNABORTED, 'prev' is already deallocated */
-        if (err == ESUCCESS) {
-          tcp_output(prev);
-        }
-      }
-    }
-  }
-
-  
-  /* Steps through all of the TIME-WAIT PCBs. */
-  prev = NULL;
-  pcb = tcp_tw_pcbs;
-  while (pcb != NULL) {
-    LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
-    pcb_remove = 0;
-
-    /* Check if this PCB has stayed long enough in TIME-WAIT */
-    if ((uint32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) {
-      ++pcb_remove;
-    }
-    
-
-
-    /* If the PCB should be removed, do it. */
-    if (pcb_remove) {
-      struct tcp_pcb *pcb2;
-      tcp_pcb_purge(pcb);
-      /* Remove PCB from tcp_tw_pcbs list. */
-      if (prev != NULL) {
-        LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs);
-        prev->next = pcb->next;
-      } else {
-        /* This PCB was the first. */
-        LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb);
-        tcp_tw_pcbs = pcb->next;
-      }
-      pcb2 = pcb;
-      pcb = pcb->next;
-                       kmem_cache_free(tcp_pcb_kcache, (void*)pcb2);
-    } else {
-      prev = pcb;
-      pcb = pcb->next;
-    }
-  }
-}
-
-
-/**
- * Deallocates a list of TCP segments (tcp_seg structures).
- *
- * @param seg tcp_seg list of TCP segments to free
+/*
+ *  called with s qlocked
  */
-void
-tcp_segs_free(struct tcp_seg *seg)
+static void tcpstart(struct conv *s, int mode)
+{
+       Tcpctl *tcb;
+       struct tcppriv *tpriv;
+       char *kpname;
+
+       tpriv = s->p->priv;
+
+       if (tpriv->ackprocstarted == 0) {
+               qlock(&tpriv->apl);
+               if (tpriv->ackprocstarted == 0) {
+                       /* tcpackproc needs to free this if it ever exits */
+                       kpname = kmalloc(KNAMELEN, MEM_WAIT);
+                       snprintf(kpname, KNAMELEN, "#I%dtcpack", s->p->f->dev);
+                       ktask(kpname, tcpackproc, s->p);
+                       tpriv->ackprocstarted = 1;
+               }
+               qunlock(&tpriv->apl);
+       }
+
+       tcb = (Tcpctl *) s->ptcl;
+
+       inittcpctl(s, mode);
+
+       iphtadd(&tpriv->ht, s);
+       switch (mode) {
+               case TCP_LISTEN:
+                       tpriv->stats[PassiveOpens]++;
+                       tcb->flags |= CLONE;
+                       tcpsetstate(s, Listen);
+                       break;
+
+               case TCP_CONNECT:
+                       tpriv->stats[ActiveOpens]++;
+                       tcb->flags |= ACTIVE;
+                       tcpsndsyn(s, tcb);
+                       tcpsetstate(s, Syn_sent);
+                       tcpoutput(s);
+                       break;
+       }
+}
+
+static char *tcpflag(uint16_t flag)
 {
-  while (seg != NULL) {
-    struct tcp_seg *next = seg->next;
-    tcp_seg_free(seg);
-    seg = next;
-  }
+       static char buf[128];
+
+       snprintf(buf, sizeof(buf), "%d", flag >> 10);   /* Head len */
+       if (flag & URG)
+               snprintf(buf, sizeof(buf), "%s%s", buf, " URG");
+       if (flag & ACK)
+               snprintf(buf, sizeof(buf), "%s%s", buf, " ACK");
+       if (flag & PSH)
+               snprintf(buf, sizeof(buf), "%s%s", buf, " PSH");
+       if (flag & RST)
+               snprintf(buf, sizeof(buf), "%s%s", buf, " RST");
+       if (flag & SYN)
+               snprintf(buf, sizeof(buf), "%s%s", buf, " SYN");
+       if (flag & FIN)
+               snprintf(buf, sizeof(buf), "%s%s", buf, " FIN");
+
+       return buf;
 }
 
-/**
- * Frees a TCP segment (tcp_seg structure).
- *
- * @param seg single tcp_seg to free
- */
-void
-tcp_seg_free(struct tcp_seg *seg)
-{
-  if (seg != NULL) {
-    if (seg->p != NULL) {
-      pbuf_free(seg->p);
-#if TCP_DEBUG
-      seg->p = NULL;
-#endif /* TCP_DEBUG */
-    }
-               kmem_cache_free(tcp_segment_kcache, seg);
-  }
-}
-
-/**
- * Sets the priority of a connection.
- *
- * @param pcb the tcp_pcb to manipulate
- * @param prio new priority
- */
-void
-tcp_setprio(struct tcp_pcb *pcb, uint8_t prio)
+/* Helper, determine if we should send a TCP timestamp.  ts_val was the
+ * timestamp from our distant end.  We'll also send a TS on SYN (no ACK). */
+static bool tcp_seg_has_ts(Tcp *tcph)
 {
-  pcb->prio = prio;
+       return tcph->ts_val || ((tcph->flags & SYN) && !(tcph->flags & ACK));
 }
 
-#if TCP_QUEUE_OOSEQ
-/**
- * Returns a copy of the given TCP segment.
- * The pbuf and data are not copied, only the pointers
- *
- * @param seg the old tcp_seg
- * @return a copy of seg
- */ 
-struct tcp_seg *
-tcp_seg_copy(struct tcp_seg *seg)
+/* Given a TCP header/segment and default header size (e.g. TCP4_HDRSIZE),
+ * return the actual hdr_len and opt_pad */
+static void compute_hdrlen_optpad(Tcp *tcph, uint16_t default_hdrlen,
+                                  uint16_t *ret_hdrlen, uint16_t *ret_optpad,
+                                  Tcpctl *tcb)
+{
+       uint16_t hdrlen = default_hdrlen;
+       uint16_t optpad = 0;
+
+       if (tcph->flags & SYN) {
+               if (tcph->mss)
+                       hdrlen += MSS_LENGTH;
+               if (tcph->ws)
+                       hdrlen += WS_LENGTH;
+               if (tcph->sack_ok)
+                       hdrlen += SACK_OK_LENGTH;
+       }
+       if (tcp_seg_has_ts(tcph)) {
+               hdrlen += TS_LENGTH;
+               /* SYNs have other opts, don't do the PREPAD NOOP optimization. */
+               if (!(tcph->flags & SYN))
+                       hdrlen += TS_SEND_PREPAD;
+       }
+       if (tcb && tcb->rcv.nr_sacks)
+               hdrlen += 2 + tcb->rcv.nr_sacks * 8;
+       optpad = hdrlen & 3;
+       if (optpad)
+               optpad = 4 - optpad;
+       hdrlen += optpad;
+       *ret_hdrlen = hdrlen;
+       *ret_optpad = optpad;
+}
+
+/* Writes the TCP options for tcph to opt. */
+static void write_opts(Tcp *tcph, uint8_t *opt, uint16_t optpad, Tcpctl *tcb)
 {
-  struct tcp_seg *cseg;
+       if (tcph->flags & SYN) {
+               if (tcph->mss != 0) {
+                       *opt++ = MSSOPT;
+                       *opt++ = MSS_LENGTH;
+                       hnputs(opt, tcph->mss);
+                       opt += 2;
+               }
+               if (tcph->ws != 0) {
+                       *opt++ = WSOPT;
+                       *opt++ = WS_LENGTH;
+                       *opt++ = tcph->ws;
+               }
+               if (tcph->sack_ok) {
+                       *opt++ = SACK_OK_OPT;
+                       *opt++ = SACK_OK_LENGTH;
+               }
+       }
+       if (tcp_seg_has_ts(tcph)) {
+               if (!(tcph->flags & SYN)) {
+                       *opt++ = NOOPOPT;
+                       *opt++ = NOOPOPT;
+               }
+               *opt++ = TS_OPT;
+               *opt++ = TS_LENGTH;
+               /* Setting TSval, our time */
+               hnputl(opt, milliseconds());
+               opt += 4;
+               /* Setting TSecr, the time we last saw from them, stored in ts_val */
+               hnputl(opt, tcph->ts_val);
+               opt += 4;
+       }
+       if (tcb && tcb->rcv.nr_sacks) {
+               *opt++ = SACK_OPT;
+               *opt++ = 2 + tcb->rcv.nr_sacks * 8;
+               for (int i = 0; i < tcb->rcv.nr_sacks; i++) {
+                       hnputl(opt, tcb->rcv.sacks[i].left);
+                       opt += 4;
+                       hnputl(opt, tcb->rcv.sacks[i].right);
+                       opt += 4;
+               }
+       }
+       while (optpad-- > 0)
+               *opt++ = NOOPOPT;
+}
 
-  cseg = (struct tcp_seg *)kmem_cache_alloc(tcp_segment_kcache, 0);
-  if (cseg == NULL) {
-    return NULL;
-  }
-  memcpy((uint8_t *)cseg, (const uint8_t *)seg, sizeof(struct tcp_seg)); 
-  pbuf_ref(cseg->p);
-  return cseg;
+/* Given a data block (or NULL) returns a block with enough header room that we
+ * can send out.  block->wp is set to the beginning of the payload.  Returns
+ * NULL on some sort of error. */
+static struct block *alloc_or_pad_block(struct block *data,
+                                        uint16_t total_hdr_size)
+{
+       if (data) {
+               data = padblock(data, total_hdr_size);
+               if (data == NULL)
+                       return NULL;
+       } else {
+               /* the 64 pad is to meet mintu's */
+               data = block_alloc(total_hdr_size + 64, MEM_WAIT);
+               if (data == NULL)
+                       return NULL;
+               data->wp += total_hdr_size;
+       }
+       return data;
 }
-#endif /* TCP_QUEUE_OOSEQ */
 
+static struct block *htontcp6(Tcp *tcph, struct block *data, Tcp6hdr *ph,
+                              Tcpctl *tcb)
+{
+       int dlen = blocklen(data);
+       Tcp6hdr *h;
+       uint16_t csum;
+       uint16_t hdrlen, optpad;
+
+       compute_hdrlen_optpad(tcph, TCP6_HDRSIZE, &hdrlen, &optpad, tcb);
+
+       data = alloc_or_pad_block(data, hdrlen + TCP6_PKT);
+       if (data == NULL)
+               return NULL;
+       /* relative to the block start (bp->rp).  Note TCP structs include IP. */
+       data->network_offset = 0;
+       data->transport_offset = offsetof(Tcp6hdr, tcpsport);
+
+       /* copy in pseudo ip header plus port numbers */
+       h = (Tcp6hdr *) (data->rp);
+       memmove(h, ph, TCP6_TCBPHDRSZ);
+
+       /* compose pseudo tcp header, do cksum calculation */
+       hnputl(h->vcf, hdrlen + dlen);
+       h->ploadlen[0] = h->ploadlen[1] = h->proto = 0;
+       h->ttl = ph->proto;
+
+       /* copy in variable bits */
+       hnputl(h->tcpseq, tcph->seq);
+       hnputl(h->tcpack, tcph->ack);
+       hnputs(h->tcpflag, (hdrlen << 10) | tcph->flags);
+       hnputs(h->tcpwin, tcph->wnd >> (tcb != NULL ? tcb->snd.scale : 0));
+       hnputs(h->tcpurg, tcph->urg);
+
+       write_opts(tcph, h->tcpopt, optpad, tcb);
+
+       if (tcb != NULL && tcb->nochecksum) {
+               h->tcpcksum[0] = h->tcpcksum[1] = 0;
+       } else {
+               csum = ptclcsum(data, TCP6_IPLEN, hdrlen + dlen + TCP6_PHDRSIZE);
+               hnputs(h->tcpcksum, csum);
+       }
+
+       /* move from pseudo header back to normal ip header */
+       memset(h->vcf, 0, 4);
+       h->vcf[0] = IP_VER6;
+       hnputs(h->ploadlen, hdrlen + dlen);
+       h->proto = ph->proto;
 
+       return data;
+}
 
-/**
- * Used to specify the argument that should be passed callback
- * functions.
- *
- * @param pcb tcp_pcb to set the callback argument
- * @param arg void pointer argument to pass to callback functions
- */ 
-void
-tcp_arg(struct tcp_pcb *pcb, void *arg)
-{  
-  pcb->callback_arg = arg;
-}
-#if LWIP_CALLBACK_API
-
-/**
- * Used to specify the function that should be called when a TCP
- * connection receives data.
- *
- * @param pcb tcp_pcb to set the recv callback
- * @param recv callback function to call for this pcb when data is received
- */ 
-void
-tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv)
+static struct block *htontcp4(Tcp *tcph, struct block *data, Tcp4hdr *ph,
+                              Tcpctl *tcb)
 {
-  pcb->recv = recv;
+       int dlen = blocklen(data);
+       Tcp4hdr *h;
+       uint16_t csum;
+       uint16_t hdrlen, optpad;
+
+       compute_hdrlen_optpad(tcph, TCP4_HDRSIZE, &hdrlen, &optpad, tcb);
+
+       data = alloc_or_pad_block(data, hdrlen + TCP4_PKT);
+       if (data == NULL)
+               return NULL;
+       /* relative to the block start (bp->rp).  Note TCP structs include IP. */
+       data->network_offset = 0;
+       data->transport_offset = offsetof(Tcp4hdr, tcpsport);
+
+       /* copy in pseudo ip header plus port numbers */
+       h = (Tcp4hdr *) (data->rp);
+       memmove(h, ph, TCP4_TCBPHDRSZ);
+
+       /* copy in variable bits */
+       hnputs(h->tcplen, hdrlen + dlen);
+       hnputl(h->tcpseq, tcph->seq);
+       hnputl(h->tcpack, tcph->ack);
+       hnputs(h->tcpflag, (hdrlen << 10) | tcph->flags);
+       hnputs(h->tcpwin, tcph->wnd >> (tcb != NULL ? tcb->snd.scale : 0));
+       hnputs(h->tcpurg, tcph->urg);
+
+       write_opts(tcph, h->tcpopt, optpad, tcb);
+
+       if (tcb != NULL && tcb->nochecksum) {
+               h->tcpcksum[0] = h->tcpcksum[1] = 0;
+       } else {
+               assert(data->transport_offset == TCP4_IPLEN + TCP4_PHDRSIZE);
+               csum = ~ptclcsum(data, TCP4_IPLEN, TCP4_PHDRSIZE);
+               hnputs(h->tcpcksum, csum);
+               data->tx_csum_offset = ph->tcpcksum - ph->tcpsport;
+               data->flag |= Btcpck;
+       }
+
+       return data;
 }
 
-/**
- * Used to specify the function that should be called when TCP data
- * has been successfully delivered to the remote host.
- *
- * @param pcb tcp_pcb to set the sent callback
- * @param sent callback function to call for this pcb when data is successfully sent
- */ 
-void
-tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent)
+static void parse_inbound_sacks(Tcp *tcph, uint8_t *opt, uint16_t optlen)
 {
-  pcb->sent = sent;
+       uint8_t nr_sacks;
+       uint32_t left, right;
+
+       nr_sacks = (optlen - 2) / 8;
+       if (nr_sacks > MAX_NR_SACKS_PER_PACKET)
+               return;
+       opt += 2;
+       for (int i = 0; i < nr_sacks; i++, opt += 8) {
+               left = nhgetl(opt);
+               right = nhgetl(opt + 4);
+               if (seq_ge(left, right)) {
+                       /* bad / malicious SACK.  Skip it, and adjust. */
+                       nr_sacks--;
+                       i--;    /* stay on this array element next loop */
+                       continue;
+               }
+               tcph->sacks[i].left = left;
+               tcph->sacks[i].right = right;
+       }
+       tcph->nr_sacks = nr_sacks;
 }
 
-/**
- * Used to specify the function that should be called when a fatal error
- * has occured on the connection.
- *
- * @param pcb tcp_pcb to set the err callback
- * @param err callback function to call for this pcb when a fatal error
- *        has occured on the connection
- */ 
-void
-tcp_err(struct tcp_pcb *pcb, tcp_err_fn err)
+static void parse_inbound_opts(Tcp *tcph, uint8_t *opt, uint16_t optsize)
 {
-  pcb->errf = err;
+       uint16_t optlen;
+
+       while (optsize > 0 && *opt != EOLOPT) {
+               if (*opt == NOOPOPT) {
+                       optsize--;
+                       opt++;
+                       continue;
+               }
+               optlen = opt[1];
+               if (optlen < 2 || optlen > optsize)
+                       break;
+               switch (*opt) {
+                       case MSSOPT:
+                               if (optlen == MSS_LENGTH)
+                                       tcph->mss = nhgets(opt + 2);
+                               break;
+                       case WSOPT:
+                               if (optlen == WS_LENGTH && *(opt + 2) <= MAX_WS_VALUE)
+                                       tcph->ws = HaveWS | *(opt + 2);
+                               break;
+                       case SACK_OK_OPT:
+                               if (optlen == SACK_OK_LENGTH)
+                                       tcph->sack_ok = TRUE;
+                               break;
+                       case SACK_OPT:
+                               parse_inbound_sacks(tcph, opt, optlen);
+                               break;
+                       case TS_OPT:
+                               if (optlen == TS_LENGTH) {
+                                       tcph->ts_val = nhgetl(opt + 2);
+                                       tcph->ts_ecr = nhgetl(opt + 6);
+                               }
+                               break;
+               }
+               optsize -= optlen;
+               opt += optlen;
+       }
 }
 
-/**
- * Used for specifying the function that should be called when a
- * LISTENing connection has been connected to another host.
- *
- * @param pcb tcp_pcb to set the accept callback
- * @param accept callback function to call for this pcb when LISTENing
- *        connection has been connected to another host
- */ 
-void
-tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept)
+/* Helper, clears the opts.  We'll later set them with e.g. parse_inbound_opts,
+ * set them manually, or something else. */
+static void clear_tcph_opts(Tcp *tcph)
 {
-  pcb->accept = accept;
+       tcph->mss = 0;
+       tcph->ws = 0;
+       tcph->sack_ok = FALSE;
+       tcph->nr_sacks = 0;
+       tcph->ts_val = 0;
+       tcph->ts_ecr = 0;
 }
-#endif /* LWIP_CALLBACK_API */
 
+static int ntohtcp6(Tcp *tcph, struct block **bpp)
+{
+       Tcp6hdr *h;
+       uint16_t hdrlen;
+
+       *bpp = pullupblock(*bpp, TCP6_PKT + TCP6_HDRSIZE);
+       if (*bpp == NULL)
+               return -1;
 
-/**
- * Used to specify the function that should be called periodically
- * from TCP. The interval is specified in terms of the TCP coarse
- * timer interval, which is called twice a second.
- *
- */ 
-void
-tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, uint8_t interval)
+       h = (Tcp6hdr *) ((*bpp)->rp);
+       tcph->source = nhgets(h->tcpsport);
+       tcph->dest = nhgets(h->tcpdport);
+       tcph->seq = nhgetl(h->tcpseq);
+       tcph->ack = nhgetl(h->tcpack);
+       hdrlen = (h->tcpflag[0] >> 2) & ~3;
+       if (hdrlen < TCP6_HDRSIZE) {
+               freeblist(*bpp);
+               return -1;
+       }
+
+       tcph->flags = h->tcpflag[1];
+       tcph->wnd = nhgets(h->tcpwin);
+       tcph->urg = nhgets(h->tcpurg);
+       clear_tcph_opts(tcph);
+       tcph->len = nhgets(h->ploadlen) - hdrlen;
+
+       *bpp = pullupblock(*bpp, hdrlen + TCP6_PKT);
+       if (*bpp == NULL)
+               return -1;
+       parse_inbound_opts(tcph, h->tcpopt, hdrlen - TCP6_HDRSIZE);
+       return hdrlen;
+}
+
+static int ntohtcp4(Tcp *tcph, struct block **bpp)
 {
-#if LWIP_CALLBACK_API
-  pcb->poll = poll;
-#else /* LWIP_CALLBACK_API */  
-  LWIP_UNUSED_ARG(poll);
-#endif /* LWIP_CALLBACK_API */  
-  pcb->pollinterval = interval;
+       Tcp4hdr *h;
+       uint16_t hdrlen;
+
+       *bpp = pullupblock(*bpp, TCP4_PKT + TCP4_HDRSIZE);
+       if (*bpp == NULL)
+               return -1;
+
+       h = (Tcp4hdr *) ((*bpp)->rp);
+       tcph->source = nhgets(h->tcpsport);
+       tcph->dest = nhgets(h->tcpdport);
+       tcph->seq = nhgetl(h->tcpseq);
+       tcph->ack = nhgetl(h->tcpack);
+
+       hdrlen = (h->tcpflag[0] >> 2) & ~3;
+       if (hdrlen < TCP4_HDRSIZE) {
+               freeblist(*bpp);
+               return -1;
+       }
+
+       tcph->flags = h->tcpflag[1];
+       tcph->wnd = nhgets(h->tcpwin);
+       tcph->urg = nhgets(h->tcpurg);
+       clear_tcph_opts(tcph);
+       tcph->len = nhgets(h->length) - (hdrlen + TCP4_PKT);
+
+       *bpp = pullupblock(*bpp, hdrlen + TCP4_PKT);
+       if (*bpp == NULL)
+               return -1;
+       parse_inbound_opts(tcph, h->tcpopt, hdrlen - TCP4_HDRSIZE);
+       return hdrlen;
 }
 
-/**
- * Purges a TCP PCB. Removes any buffered data and frees the buffer memory
- * (pcb->ooseq, pcb->unsent and pcb->unacked are freed).
- *
- * @param pcb tcp_pcb to purge. The pcb itself is not deallocated!
- */
-void
-tcp_pcb_purge(struct tcp_pcb *pcb)
-{
-  if (pcb->state != CLOSED &&
-     pcb->state != TIME_WAIT &&
-     pcb->state != LISTEN) {
-
-    printd("tcp_pcb_purge\n");
-
-#if TCP_LISTEN_BACKLOG
-    if (pcb->state == SYN_RCVD) {
-      /* Need to find the corresponding listen_pcb and decrease its accepts_pending */
-      struct tcp_pcb_listen *lpcb;
-      LWIP_ASSERT("tcp_pcb_purge: pcb->state == SYN_RCVD but tcp_listen_pcbs is NULL",
-        tcp_listen_pcbs.listen_pcbs != NULL);
-      for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) {
-        if ((lpcb->local_port == pcb->local_port) &&
-            (ip_addr_isany(&lpcb->local_ip) ||
-             ip_addr_cmp(&pcb->local_ip, &lpcb->local_ip))) {
-            /* port and address of the listen pcb match the timed-out pcb */
-            LWIP_ASSERT("tcp_pcb_purge: listen pcb does not have accepts pending",
-              lpcb->accepts_pending > 0);
-            lpcb->accepts_pending--;
-            break;
-          }
-      }
-    }
-#endif /* TCP_LISTEN_BACKLOG */
-
-
-    if (pcb->refused_data != NULL) {
-      LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n"));
-      pbuf_free(pcb->refused_data);
-      pcb->refused_data = NULL;
-    }
-    if (pcb->unsent != NULL) {
-      LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n"));
-    }
-    if (pcb->unacked != NULL) {
-      LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n"));
-    }
-#if TCP_QUEUE_OOSEQ
-    if (pcb->ooseq != NULL) {
-      LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n"));
-    }
-    tcp_segs_free(pcb->ooseq);
-    pcb->ooseq = NULL;
-#endif /* TCP_QUEUE_OOSEQ */
-
-    /* Stop the retransmission timer as it will expect data on unacked
-       queue if it fires */
-    pcb->rtime = -1;
-
-    tcp_segs_free(pcb->unsent);
-    tcp_segs_free(pcb->unacked);
-    pcb->unacked = pcb->unsent = NULL;
-#if TCP_OVERSIZE
-    pcb->unsent_oversize = 0;
-#endif /* TCP_OVERSIZE */
-  }
-}
-
-/**
- * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first.
- *
- * @param pcblist PCB list to purge.
- * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated!
- */
-void
-tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb)
-{
-  TCP_RMV(pcblist, pcb);
-
-  tcp_pcb_purge(pcb);
-  
-  /* if there is an outstanding delayed ACKs, send it */
-  if (pcb->state != TIME_WAIT &&
-     pcb->state != LISTEN &&
-     pcb->flags & TF_ACK_DELAY) {
-    pcb->flags |= TF_ACK_NOW;
-    tcp_output(pcb);
-  }
-
-  if (pcb->state != LISTEN) {
-    LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL);
-    LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL);
-#if TCP_QUEUE_OOSEQ
-    LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL);
-#endif /* TCP_QUEUE_OOSEQ */
-  }
-
-  pcb->state = CLOSED;
-
-  LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane());
-}
-
-#if TCP_CALCULATE_EFF_SEND_MSS
-/**
- * Calcluates the effective send mss that can be used for a specific IP address
- * by using ip_route to determin the netif used to send to the address and
- * calculating the minimum of TCP_MSS and that netif's mtu (if set).
+/*
+ *  For outgoing calls, generate an initial sequence
+ *  number and put a SYN on the send queue
  */
-uint16_t
-tcp_eff_send_mss(uint16_t sendmss, ip_addr_t *addr)
+static void tcpsndsyn(struct conv *s, Tcpctl *tcb)
 {
-  uint16_t mss_s;
-  struct netif *outif;
+       urandom_read(&tcb->iss, sizeof(tcb->iss));
+       tcb->rttseq = tcb->iss;
+       tcb->snd.wl2 = tcb->iss;
+       tcb->snd.una = tcb->iss;
+       tcb->snd.rtx = tcb->rttseq;
+       tcb->snd.nxt = tcb->rttseq;
+       tcb->flgcnt++;
+       tcb->flags |= FORCE;
+       tcb->sndsyntime = NOW;
+
+       /* set desired mss and scale */
+       tcb->mss = tcpmtu(tcb->ifc, s->ipversion, &tcb->scale);
+}
+
+static void sndrst(struct Proto *tcp, uint8_t *source, uint8_t *dest,
+                   uint16_t length, Tcp *seg, uint8_t version, char *reason)
+{
+       struct block *hbp;
+       uint8_t rflags;
+       struct tcppriv *tpriv;
+       Tcp4hdr ph4;
+       Tcp6hdr ph6;
+
+       netlog(tcp->f, Logtcpreset, "sndrst: %s\n", reason);
+
+       tpriv = tcp->priv;
+
+       if (seg->flags & RST)
+               return;
+
+       /* make pseudo header */
+       switch (version) {
+               case V4:
+                       memset(&ph4, 0, sizeof(ph4));
+                       ph4.vihl = IP_VER4;
+                       v6tov4(ph4.tcpsrc, dest);
+                       v6tov4(ph4.tcpdst, source);
+                       ph4.proto = IP_TCPPROTO;
+                       hnputs(ph4.tcplen, TCP4_HDRSIZE);
+                       hnputs(ph4.tcpsport, seg->dest);
+                       hnputs(ph4.tcpdport, seg->source);
+                       break;
+               case V6:
+                       memset(&ph6, 0, sizeof(ph6));
+                       ph6.vcf[0] = IP_VER6;
+                       ipmove(ph6.tcpsrc, dest);
+                       ipmove(ph6.tcpdst, source);
+                       ph6.proto = IP_TCPPROTO;
+                       hnputs(ph6.ploadlen, TCP6_HDRSIZE);
+                       hnputs(ph6.tcpsport, seg->dest);
+                       hnputs(ph6.tcpdport, seg->source);
+                       break;
+               default:
+                       panic("sndrst: version %d", version);
+       }
 
-  //outif = ip_route(addr);
-    mss_s = DEFAULT_MTU - IP_HDR_SZ - TCP_HLEN;
-    /* RFC 1122, chap 4.2.2.6:
-     * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize
-     * We correct for TCP options in tcp_write(), and don't support IP options.
-     */
-    sendmss = MIN(sendmss, mss_s);
-  return sendmss;
+       tpriv->stats[OutRsts]++;
+       rflags = RST;
+
+       /* convince the other end that this reset is in band */
+       if (seg->flags & ACK) {
+               seg->seq = seg->ack;
+               seg->ack = 0;
+       } else {
+               rflags |= ACK;
+               seg->ack = seg->seq;
+               seg->seq = 0;
+               if (seg->flags & SYN)
+                       seg->ack++;
+               seg->ack += length;
+               if (seg->flags & FIN)
+                       seg->ack++;
+       }
+       seg->flags = rflags;
+       seg->wnd = 0;
+       seg->urg = 0;
+       seg->mss = 0;
+       seg->ws = 0;
+       seg->sack_ok = FALSE;
+       seg->nr_sacks = 0;
+       /* seg->ts_val is already set with their timestamp */
+       switch (version) {
+               case V4:
+                       hbp = htontcp4(seg, NULL, &ph4, NULL);
+                       if (hbp == NULL)
+                               return;
+                       ipoput4(tcp->f, hbp, 0, MAXTTL, DFLTTOS, NULL);
+                       break;
+               case V6:
+                       hbp = htontcp6(seg, NULL, &ph6, NULL);
+                       if (hbp == NULL)
+                               return;
+                       ipoput6(tcp->f, hbp, 0, MAXTTL, DFLTTOS, NULL);
+                       break;
+               default:
+                       panic("sndrst2: version %d", version);
+       }
+}
+
+/*
+ *  send a reset to the remote side and close the conversation
+ *  called with s qlocked
+ */
+static void tcphangup(struct conv *s)
+{
+       ERRSTACK(1);
+       Tcp seg;
+       Tcpctl *tcb;
+       struct block *hbp;
+
+       tcb = (Tcpctl *) s->ptcl;
+       if (ipcmp(s->raddr, IPnoaddr)) {
+               /* discard error style, poperror regardless */
+               if (!waserror()) {
+                       seg.flags = RST | ACK;
+                       seg.ack = tcb->rcv.nxt;
+                       tcb->last_ack_sent = seg.ack;
+                       tcb->rcv.una = 0;
+                       seg.seq = tcb->snd.nxt;
+                       seg.wnd = 0;
+                       seg.urg = 0;
+                       seg.mss = 0;
+                       seg.ws = 0;
+                       seg.sack_ok = FALSE;
+                       seg.nr_sacks = 0;
+                       seg.ts_val = tcb->ts_recent;
+                       switch (s->ipversion) {
+                               case V4:
+                                       tcb->protohdr.tcp4hdr.vihl = IP_VER4;
+                                       hbp = htontcp4(&seg, NULL, &tcb->protohdr.tcp4hdr, tcb);
+                                       ipoput4(s->p->f, hbp, 0, s->ttl, s->tos, s);
+                                       break;
+                               case V6:
+                                       tcb->protohdr.tcp6hdr.vcf[0] = IP_VER6;
+                                       hbp = htontcp6(&seg, NULL, &tcb->protohdr.tcp6hdr, tcb);
+                                       ipoput6(s->p->f, hbp, 0, s->ttl, s->tos, s);
+                                       break;
+                               default:
+                                       panic("tcphangup: version %d", s->ipversion);
+                       }
+               }
+               poperror();
+       }
+       localclose(s, NULL);
 }
-#endif /* TCP_CALCULATE_EFF_SEND_MSS */
 
-const char*
-tcp_debug_state_str(enum tcp_state s)
+/*
+ *  (re)send a SYN ACK
+ */
+static int sndsynack(struct Proto *tcp, Limbo *lp)
 {
-  return tcp_state_str[s];
+       struct block *hbp;
+       Tcp4hdr ph4;
+       Tcp6hdr ph6;
+       Tcp seg;
+       int scale;
+       uint8_t flag = 0;
+
+       /* make pseudo header */
+       switch (lp->version) {
+               case V4:
+                       memset(&ph4, 0, sizeof(ph4));
+                       ph4.vihl = IP_VER4;
+                       v6tov4(ph4.tcpsrc, lp->laddr);
+                       v6tov4(ph4.tcpdst, lp->raddr);
+                       ph4.proto = IP_TCPPROTO;
+                       hnputs(ph4.tcplen, TCP4_HDRSIZE);
+                       hnputs(ph4.tcpsport, lp->lport);
+                       hnputs(ph4.tcpdport, lp->rport);
+                       break;
+               case V6:
+                       memset(&ph6, 0, sizeof(ph6));
+                       ph6.vcf[0] = IP_VER6;
+                       ipmove(ph6.tcpsrc, lp->laddr);
+                       ipmove(ph6.tcpdst, lp->raddr);
+                       ph6.proto = IP_TCPPROTO;
+                       hnputs(ph6.ploadlen, TCP6_HDRSIZE);
+                       hnputs(ph6.tcpsport, lp->lport);
+                       hnputs(ph6.tcpdport, lp->rport);
+                       break;
+               default:
+                       panic("sndrst: version %d", lp->version);
+       }
+       lp->ifc = findipifc(tcp->f, lp->laddr, 0);
+
+       seg.seq = lp->iss;
+       seg.ack = lp->irs + 1;
+       seg.flags = SYN | ACK;
+       seg.urg = 0;
+       seg.mss = tcpmtu(lp->ifc, lp->version, &scale);
+       seg.wnd = QMAX;
+       seg.ts_val = lp->ts_val;
+       seg.nr_sacks = 0;
+
+       /* if the other side set scale, we should too */
+       if (lp->rcvscale) {
+               seg.ws = scale;
+               lp->sndscale = scale;
+       } else {
+               seg.ws = 0;
+               lp->sndscale = 0;
+       }
+       if (SACK_SUPPORTED)
+               seg.sack_ok = lp->sack_ok;
+       else
+               seg.sack_ok = FALSE;
+
+       switch (lp->version) {
+               case V4:
+                       hbp = htontcp4(&seg, NULL, &ph4, NULL);
+                       if (hbp == NULL)
+                               return -1;
+                       ipoput4(tcp->f, hbp, 0, MAXTTL, DFLTTOS, NULL);
+                       break;
+               case V6:
+                       hbp = htontcp6(&seg, NULL, &ph6, NULL);
+                       if (hbp == NULL)
+                               return -1;
+                       ipoput6(tcp->f, hbp, 0, MAXTTL, DFLTTOS, NULL);
+                       break;
+               default:
+                       panic("sndsnack: version %d", lp->version);
+       }
+       lp->lastsend = NOW;
+       return 0;
 }
 
-#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG
-/**
- * Print a tcp header for debugging purposes.
+#define hashipa(a, p) ( ( (a)[IPaddrlen-2] + (a)[IPaddrlen-1] + p )&LHTMASK )
+
+/*
+ *  put a call into limbo and respond with a SYN ACK
  *
- * @param tcphdr pointer to a struct tcp_hdr
+ *  called with proto locked
+ */
+static void limbo(struct conv *s, uint8_t *source, uint8_t *dest, Tcp *seg,
+                  int version)
+{
+       Limbo *lp, **l;
+       struct tcppriv *tpriv;
+       int h;
+
+       tpriv = s->p->priv;
+       h = hashipa(source, seg->source);
+
+       for (l = &tpriv->lht[h]; *l != NULL; l = &lp->next) {
+               lp = *l;
+               if (lp->lport != seg->dest || lp->rport != seg->source
+                       || lp->version != version)
+                       continue;
+               if (ipcmp(lp->raddr, source) != 0)
+                       continue;
+               if (ipcmp(lp->laddr, dest) != 0)
+                       continue;
+
+               /* each new SYN restarts the retransmits */
+               lp->irs = seg->seq;
+               break;
+       }
+       lp = *l;
+       if (lp == NULL) {
+               if (tpriv->nlimbo >= Maxlimbo && tpriv->lht[h]) {
+                       lp = tpriv->lht[h];
+                       tpriv->lht[h] = lp->next;
+                       lp->next = NULL;
+               } else {
+                       lp = kzmalloc(sizeof(*lp), 0);
+                       if (lp == NULL)
+                               return;
+                       tpriv->nlimbo++;
+               }
+               *l = lp;
+               lp->version = version;
+               ipmove(lp->laddr, dest);
+               ipmove(lp->raddr, source);
+               lp->lport = seg->dest;
+               lp->rport = seg->source;
+               lp->mss = seg->mss;
+               lp->rcvscale = seg->ws;
+               lp->sack_ok = seg->sack_ok;
+               lp->irs = seg->seq;
+               lp->ts_val = seg->ts_val;
+               urandom_read(&lp->iss, sizeof(lp->iss));
+       }
+
+       if (sndsynack(s->p, lp) < 0) {
+               *l = lp->next;
+               tpriv->nlimbo--;
+               kfree(lp);
+       }
+}
+
+/*
+ *  resend SYN ACK's once every SYNACK_RXTIMER ms.
  */
-void
-tcp_debug_print(struct tcp_hdr *tcphdr)
-{
-  LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("|    %5"U16_F"      |    %5"U16_F"      | (src port, dest port)\n",
-         ntohs(tcphdr->src), ntohs(tcphdr->dest)));
-  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("|           %010"U32_F"          | (seq no)\n",
-          ntohl(tcphdr->seqno)));
-  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("|           %010"U32_F"          | (ack no)\n",
-         ntohl(tcphdr->ackno)));
-  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" |   |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"|     %5"U16_F"     | (hdrlen, flags (",
-       TCPH_HDRLEN(tcphdr),
-         TCPH_FLAGS(tcphdr) >> 5 & 1,
-         TCPH_FLAGS(tcphdr) >> 4 & 1,
-         TCPH_FLAGS(tcphdr) >> 3 & 1,
-         TCPH_FLAGS(tcphdr) >> 2 & 1,
-         TCPH_FLAGS(tcphdr) >> 1 & 1,
-         TCPH_FLAGS(tcphdr) & 1,
-         ntohs(tcphdr->wnd)));
-  tcp_debug_print_flags(TCPH_FLAGS(tcphdr));
-  LWIP_DEBUGF(TCP_DEBUG, ("), win)\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
-  LWIP_DEBUGF(TCP_DEBUG, ("|    0x%04"X16_F"     |     %5"U16_F"     | (chksum, urgp)\n",
-         ntohs(tcphdr->chksum), ntohs(tcphdr->urgp)));
-  LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n"));
-}
-
-/**
- * Print a tcp state for debugging purposes.
+static void limborexmit(struct Proto *tcp)
+{
+       struct tcppriv *tpriv;
+       Limbo **l, *lp;
+       int h;
+       int seen;
+       uint64_t now;
+
+       tpriv = tcp->priv;
+
+       if (!canqlock(&tcp->qlock))
+               return;
+       seen = 0;
+       now = NOW;
+       for (h = 0; h < NLHT && seen < tpriv->nlimbo; h++) {
+               for (l = &tpriv->lht[h]; *l != NULL && seen < tpriv->nlimbo;) {
+                       lp = *l;
+                       seen++;
+                       if (now - lp->lastsend < (lp->rexmits + 1) * SYNACK_RXTIMER)
+                               continue;
+
+                       /* time it out after 1 second */
+                       if (++(lp->rexmits) > 5) {
+                               tpriv->nlimbo--;
+                               *l = lp->next;
+                               kfree(lp);
+                               continue;
+                       }
+
+                       /* if we're being attacked, don't bother resending SYN ACK's */
+                       if (tpriv->nlimbo > 100)
+                               continue;
+
+                       if (sndsynack(tcp, lp) < 0) {
+                               tpriv->nlimbo--;
+                               *l = lp->next;
+                               kfree(lp);
+                               continue;
+                       }
+
+                       l = &lp->next;
+               }
+       }
+       qunlock(&tcp->qlock);
+}
+
+/*
+ *  lookup call in limbo.  if found, throw it out.
  *
- * @param s enum tcp_state to print
+ *  called with proto locked
  */
-void
-tcp_debug_print_state(enum tcp_state s)
+static void limborst(struct conv *s, Tcp *segp, uint8_t *src, uint8_t *dst,
+                     uint8_t version)
+{
+       Limbo *lp, **l;
+       int h;
+       struct tcppriv *tpriv;
+
+       tpriv = s->p->priv;
+
+       /* find a call in limbo */
+       h = hashipa(src, segp->source);
+       for (l = &tpriv->lht[h]; *l != NULL; l = &lp->next) {
+               lp = *l;
+               if (lp->lport != segp->dest || lp->rport != segp->source
+                       || lp->version != version)
+                       continue;
+               if (ipcmp(lp->laddr, dst) != 0)
+                       continue;
+               if (ipcmp(lp->raddr, src) != 0)
+                       continue;
+
+               /* RST can only follow the SYN */
+               if (segp->seq == lp->irs + 1) {
+                       tpriv->nlimbo--;
+                       *l = lp->next;
+                       kfree(lp);
+               }
+               break;
+       }
+}
+
+/* The advertised MSS (e.g. 1460) includes any per-packet TCP options, such as
+ * TCP timestamps.  A given packet will contain mss bytes, but only typical_mss
+ * bytes of *data*.  If we know we'll use those options, we should adjust our
+ * typical_mss, which will affect the cwnd. */
+static void adjust_typical_mss_for_opts(Tcp *tcph, Tcpctl *tcb)
 {
-  LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s]));
+       uint16_t opt_size = 0;
+
+       if (tcph->ts_val)
+               opt_size += TS_LENGTH + TS_SEND_PREPAD;
+       opt_size = ROUNDUP(opt_size, 4);
+       tcb->typical_mss -= opt_size;
 }
 
-/**
- * Print tcp flags for debugging purposes.
+/*
+ *  come here when we finally get an ACK to our SYN-ACK.
+ *  lookup call in limbo.  if found, create a new conversation
  *
- * @param flags tcp flags, all active flags are printed
+ *  called with proto locked
  */
-void
-tcp_debug_print_flags(uint8_t flags)
-{
-  if (flags & TCP_FIN) {
-    LWIP_DEBUGF(TCP_DEBUG, ("FIN "));
-  }
-  if (flags & TCP_SYN) {
-    LWIP_DEBUGF(TCP_DEBUG, ("SYN "));
-  }
-  if (flags & TCP_RST) {
-    LWIP_DEBUGF(TCP_DEBUG, ("RST "));
-  }
-  if (flags & TCP_PSH) {
-    LWIP_DEBUGF(TCP_DEBUG, ("PSH "));
-  }
-  if (flags & TCP_ACK) {
-    LWIP_DEBUGF(TCP_DEBUG, ("ACK "));
-  }
-  if (flags & TCP_URG) {
-    LWIP_DEBUGF(TCP_DEBUG, ("URG "));
-  }
-  if (flags & TCP_ECE) {
-    LWIP_DEBUGF(TCP_DEBUG, ("ECE "));
-  }
-  if (flags & TCP_CWR) {
-    LWIP_DEBUGF(TCP_DEBUG, ("CWR "));
-  }
-  LWIP_DEBUGF(TCP_DEBUG, ("\n"));
-}
-
-/**
- * Print all tcp_pcbs in every list for debugging purposes.
+static struct conv *tcpincoming(struct conv *s, Tcp *segp, uint8_t *src,
+                                                               uint8_t *dst, uint8_t version)
+{
+       struct conv *new;
+       Tcpctl *tcb;
+       struct tcppriv *tpriv;
+       Tcp4hdr *h4;
+       Tcp6hdr *h6;
+       Limbo *lp, **l;
+       int h;
+
+       /* unless it's just an ack, it can't be someone coming out of limbo */
+       if ((segp->flags & SYN) || (segp->flags & ACK) == 0)
+               return NULL;
+
+       tpriv = s->p->priv;
+
+       /* find a call in limbo */
+       h = hashipa(src, segp->source);
+       for (l = &tpriv->lht[h]; (lp = *l) != NULL; l = &lp->next) {
+               netlog(s->p->f, Logtcp,
+                          "tcpincoming s %I!%d/%I!%d d %I!%d/%I!%d v %d/%d\n", src,
+                          segp->source, lp->raddr, lp->rport, dst, segp->dest, lp->laddr,
+                          lp->lport, version, lp->version);
+
+               if (lp->lport != segp->dest || lp->rport != segp->source
+                       || lp->version != version)
+                       continue;
+               if (ipcmp(lp->laddr, dst) != 0)
+                       continue;
+               if (ipcmp(lp->raddr, src) != 0)
+                       continue;
+
+               /* we're assuming no data with the initial SYN */
+               if (segp->seq != lp->irs + 1 || segp->ack != lp->iss + 1) {
+                       netlog(s->p->f, Logtcp, "tcpincoming s 0x%lx/0x%lx a 0x%lx 0x%lx\n",
+                                  segp->seq, lp->irs + 1, segp->ack, lp->iss + 1);
+                       lp = NULL;
+               } else {
+                       tpriv->nlimbo--;
+                       *l = lp->next;
+               }
+               break;
+       }
+       if (lp == NULL)
+               return NULL;
+
+       new = Fsnewcall(s, src, segp->source, dst, segp->dest, version);
+       if (new == NULL)
+               return NULL;
+
+       memmove(new->ptcl, s->ptcl, sizeof(Tcpctl));
+       tcb = (Tcpctl *) new->ptcl;
+       tcb->flags &= ~CLONE;
+       tcb->timer.arg = new;
+       tcb->timer.state = TcptimerOFF;
+       tcb->acktimer.arg = new;
+       tcb->acktimer.state = TcptimerOFF;
+       tcb->katimer.arg = new;
+       tcb->katimer.state = TcptimerOFF;
+       tcb->rtt_timer.arg = new;
+       tcb->rtt_timer.state = TcptimerOFF;
+
+       tcb->irs = lp->irs;
+       tcb->rcv.nxt = tcb->irs + 1;
+       tcb->rcv.urg = tcb->rcv.nxt;
+
+       tcb->iss = lp->iss;
+       tcb->rttseq = tcb->iss;
+       tcb->snd.wl2 = tcb->iss;
+       tcb->snd.una = tcb->iss + 1;
+       tcb->snd.rtx = tcb->iss + 1;
+       tcb->snd.nxt = tcb->iss + 1;
+       tcb->flgcnt = 0;
+       tcb->flags |= SYNACK;
+
+       /* our sending max segment size cannot be bigger than what he asked for */
+       if (lp->mss != 0 && lp->mss < tcb->mss) {
+               tcb->mss = lp->mss;
+               tcb->typical_mss = tcb->mss;
+       }
+       adjust_typical_mss_for_opts(segp, tcb);
+
+       /* Here's where we record the previously-decided header options.  They were
+        * actually decided on when we agreed to them in the SYNACK we sent.  We
+        * didn't create an actual TCB until now, so we can copy those decisions out
+        * of the limbo tracker and into the TCB. */
+       tcb->ifc = lp->ifc;
+       tcb->sack_ok = lp->sack_ok;
+       /* window scaling */
+       tcpsetscale(new, tcb, lp->rcvscale, lp->sndscale);
+       tcb_check_tso(tcb);
+
+       tcb->snd.wnd = segp->wnd;
+       tcb->cwind = tcb->typical_mss * CWIND_SCALE;
+
+       /* set initial round trip time */
+       tcb->sndsyntime = lp->lastsend + lp->rexmits * SYNACK_RXTIMER;
+       tcpsynackrtt(new);
+
+       kfree(lp);
+
+       /* set up proto header */
+       switch (version) {
+               case V4:
+                       h4 = &tcb->protohdr.tcp4hdr;
+                       memset(h4, 0, sizeof(*h4));
+                       h4->proto = IP_TCPPROTO;
+                       hnputs(h4->tcpsport, new->lport);
+                       hnputs(h4->tcpdport, new->rport);
+                       v6tov4(h4->tcpsrc, dst);
+                       v6tov4(h4->tcpdst, src);
+                       break;
+               case V6:
+                       h6 = &tcb->protohdr.tcp6hdr;
+                       memset(h6, 0, sizeof(*h6));
+                       h6->proto = IP_TCPPROTO;
+                       hnputs(h6->tcpsport, new->lport);
+                       hnputs(h6->tcpdport, new->rport);
+                       ipmove(h6->tcpsrc, dst);
+                       ipmove(h6->tcpdst, src);
+                       break;
+               default:
+                       panic("tcpincoming: version %d", new->ipversion);
+       }
+
+       tcpsetstate(new, Established);
+
+       iphtadd(&tpriv->ht, new);
+
+       return new;
+}
+
+/*
+ *  use the time between the first SYN and it's ack as the
+ *  initial round trip time
  */
-void
-tcp_debug_print_pcbs(void)
-{
-  struct tcp_pcb *pcb;
-  LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n"));
-  for(pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
-    LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
-                       pcb->local_port, pcb->remote_port,
-                       pcb->snd_nxt, pcb->rcv_nxt));
-    tcp_debug_print_state(pcb->state);
-  }    
-  LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n"));
-  for(pcb = (struct tcp_pcb *)tcp_listen_pcbs.pcbs; pcb != NULL; pcb = pcb->next) {
-    LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
-                       pcb->local_port, pcb->remote_port,
-                       pcb->snd_nxt, pcb->rcv_nxt));
-    tcp_debug_print_state(pcb->state);
-  }    
-  LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n"));
-  for(pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
-    LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ",
-                       pcb->local_port, pcb->remote_port,
-                       pcb->snd_nxt, pcb->rcv_nxt));
-    tcp_debug_print_state(pcb->state);
-  }    
-}
-
-/**
- * Check state consistency of the tcp_pcb lists.
+static void tcpsynackrtt(struct conv *s)
+{
+       Tcpctl *tcb;
+       uint64_t delta;
+       struct tcppriv *tpriv;
+
+       tcb = (Tcpctl *) s->ptcl;
+       tpriv = s->p->priv;
+
+       delta = NOW - tcb->sndsyntime;
+       tcb->srtt = delta;
+       tcb->mdev = delta / 2;
+
+       /* halt round trip timer */
+       tcphalt(tpriv, &tcb->rtt_timer);
+}
+
+/* For LFNs (long/fat), our default tx queue doesn't hold enough data, and TCP
+ * blocks on the application - even if the app already has the data ready to go.
+ * We need to hold the sent, unacked data (1x cwnd), plus all the data we might
+ * send next RTT (1x cwnd).  Note this is called after cwnd was expanded. */
+static void adjust_tx_qio_limit(struct conv *s)
+{
+       Tcpctl *tcb = (Tcpctl *) s->ptcl;
+       size_t ideal_limit = tcb->cwind * 2;
+
+       /* This is called for every ACK, and it's not entirely free to update the
+        * limit (locks, CVs, taps).  Updating in chunks of mss seems reasonable.
+        * During SS, we'll update this on most ACKs (given each ACK increased the
+        * cwind by > MSS).
+        *
+        * We also don't want a lot of tiny blocks from the user, but the way qio
+        * works, you can put in as much as you want (Maxatomic) and then get
+        * flow-controlled. */
+       if (qgetlimit(s->wq) + tcb->typical_mss < ideal_limit)
+               qsetlimit(s->wq, ideal_limit);
+       /* TODO: we could shrink the qio limit too, if we had a better idea what the
+        * actual threshold was.  We want the limit to be the 'stable' cwnd * 2. */
+}
+
+/* Attempts to merge later sacks into sack 'into' (index in the array) */
+static void merge_sacks_into(Tcpctl *tcb, int into)
+{
+       struct sack_block *into_sack = &tcb->snd.sacks[into];
+       struct sack_block *tcb_sack;
+       int shift = 0;
+
+       for (int i = into + 1; i < tcb->snd.nr_sacks; i++) {
+               tcb_sack = &tcb->snd.sacks[i];
+               if (seq_lt(into_sack->right, tcb_sack->left))
+                       break;
+               if (seq_gt(tcb_sack->right, into_sack->right))
+                       into_sack->right = tcb_sack->right;
+               shift++;
+       }
+       if (shift) {
+               memmove(tcb->snd.sacks + into + 1,
+                       tcb->snd.sacks + into + 1 + shift,
+                       sizeof(struct sack_block) * (tcb->snd.nr_sacks - into - 1
+                                                            - shift));
+               tcb->snd.nr_sacks -= shift;
+       }
+}
+
+/* If we update a sack, it means they received a packet (possibly out of order),
+ * but they have not received earlier packets.  Otherwise, they would do a full
+ * ACK.
+ *
+ * The trick is in knowing whether the reception growing this sack is due to a
+ * retrans or due to packets from before our last loss event.  The rightmost
+ * sack tends to grow a lot with packets we sent before the loss.  However,
+ * intermediate sacks that grow are signs of a loss, since they only grow as a
+ * result of retrans.
+ *
+ * This is only true for the first time through a retrans.  After we've gone
+ * through a full retrans blast, the sack that hinted at the retrans loss (and
+ * there could be multiple of them!) will continue to grow.  We could come up
+ * with some tracking for this, but instead we'll just do a one-time deal.  You
+ * can recover from one detected sack retrans loss.  After that, you'll have to
+ * use the RTO.
+ *
+ * This won't catch some things, like a sack that grew and merged with the
+ * rightmost sack.  This also won't work if you have a single sack.  We can't
+ * tell where the retrans ends and the sending begins. */
+static bool sack_hints_at_loss(Tcpctl *tcb, struct sack_block *tcb_sack)
+{
+       if (tcb->snd.recovery != SACK_RETRANS_RECOVERY)
+               return FALSE;
+       return &tcb->snd.sacks[tcb->snd.nr_sacks - 1] != tcb_sack;
+}
+
+static bool sack_contains(struct sack_block *tcb_sack, uint32_t seq)
+{
+       return seq_le(tcb_sack->left, seq) && seq_lt(seq, tcb_sack->right);
+}
+
+/* Debugging helper! */
+static void sack_asserter(Tcpctl *tcb, char *str)
+{
+       struct sack_block *tcb_sack;
+
+       for (int i = 0; i < tcb->snd.nr_sacks; i++) {
+               tcb_sack = &tcb->snd.sacks[i];
+               /* Checking invariants: snd.rtx is never inside a sack, sacks are always
+                * mutually exclusive. */
+               if (sack_contains(tcb_sack, tcb->snd.rtx) ||
+                   ((i + 1 < tcb->snd.nr_sacks) && seq_ge(tcb_sack->right,
+                                                              (tcb_sack + 1)->left))) {
+                       printk("SACK ASSERT ERROR at %s\n", str);
+                       printk("rtx %u una %u nxt %u, sack [%u, %u)\n",
+                              tcb->snd.rtx, tcb->snd.una, tcb->snd.nxt, tcb_sack->left,
+                                  tcb_sack->right);
+                       for (int i = 0; i < tcb->snd.nr_sacks; i++)
+                               printk("\t %d: [%u, %u)\n", i, tcb->snd.sacks[i].left,
+                                      tcb->snd.sacks[i].right);
+                       backtrace();
+                       panic("");
+               }
+       }
+}
+
+/* Updates bookkeeping whenever a sack is added or updated */
+static void sack_has_changed(struct conv *s, Tcpctl *tcb,
+                             struct sack_block *tcb_sack)
+{
+       /* Due to the change, snd.rtx might be in the middle of this sack.  Advance
+        * it to the right edge. */
+       if (sack_contains(tcb_sack, tcb->snd.rtx))
+               tcb->snd.rtx = tcb_sack->right;
+
+       /* This is a sack for something we retransed and we think it means there was
+        * another loss.  Instead of waiting for the RTO, we can take action. */
+       if (sack_hints_at_loss(tcb, tcb_sack)) {
+               if (++tcb->snd.sack_loss_hint == TCPREXMTTHRESH) {
+                       netlog(s->p->f, Logtcprxmt,
+                              "%I.%d -> %I.%d: sack rxmit loss: snd.rtx %u, sack [%u,%u), una %u, recovery_pt %u\n",
+                              s->laddr, s->lport, s->raddr, s->rport,
+                              tcb->snd.rtx, tcb_sack->left, tcb_sack->right, tcb->snd.una,
+                              tcb->snd.recovery_pt);
+                       /* Redo retrans, but keep the sacks and recovery point */
+                       tcp_loss_event(s, tcb);
+                       tcb->snd.rtx = tcb->snd.una;
+                       tcb->snd.sack_loss_hint = 0;
+                       /* Act like an RTO.  We just detected it earlier.  This prevents us
+                        * from getting another sack hint loss this recovery period and from
+                        * advancing the opportunistic right edge. */
+                       tcb->snd.recovery = RTO_RETRANS_RECOVERY;
+                       /* We didn't actually time out yet and we expect to keep getting
+                        * sacks, so we don't want to flush or worry about in_flight.  If we
+                        * messed something up, the RTO will still fire. */
+                       set_in_flight(tcb);
+               }
+       }
+}
+
+/* Advances tcb_sack's right edge, if new_right is farther, and updates the
+ * bookkeeping due to the change. */
+static void update_right_edge(struct conv *s, Tcpctl *tcb,
+                              struct sack_block *tcb_sack, uint32_t new_right)
+{
+       if (seq_le(new_right, tcb_sack->right))
+               return;
+       tcb_sack->right = new_right;
+       merge_sacks_into(tcb, tcb_sack - tcb->snd.sacks);
+       sack_has_changed(s, tcb, tcb_sack);
+}
+
+static void update_or_insert_sack(struct conv *s, Tcpctl *tcb,
+                                  struct sack_block *seg_sack)
+{
+       struct sack_block *tcb_sack;
+
+       for (int i = 0; i < tcb->snd.nr_sacks; i++) {
+               tcb_sack = &tcb->snd.sacks[i];
+               if (seq_lt(tcb_sack->left, seg_sack->left)) {
+                       /* This includes adjacent (which I've seen!) and overlap. */
+                       if (seq_le(seg_sack->left, tcb_sack->right)) {
+                               update_right_edge(s, tcb, tcb_sack, seg_sack->right);
+                               return;
+                       }
+                       continue;
+               }
+               /* Update existing sack */
+               if (tcb_sack->left == seg_sack->left) {
+                       update_right_edge(s, tcb, tcb_sack, seg_sack->right);
+                       return;
+               }
+               /* Found our slot */
+               if (seq_gt(tcb_sack->left, seg_sack->left)) {
+                       if (tcb->snd.nr_sacks == MAX_NR_SND_SACKS) {
+                               /* Out of room, but it is possible this sack overlaps later
+                                * sacks, including the max sack's right edge. */
+                               if (seq_ge(seg_sack->right, tcb_sack->left)) {
+                                       /* Take over the sack */
+                                       tcb_sack->left = seg_sack->left;
+                                       update_right_edge(s, tcb, tcb_sack, seg_sack->right);
+                               }
+                               return;
+                       }
+                       /* O/W, it's our slot and we have room (at least one spot). */
+                       memmove(&tcb->snd.sacks[i + 1], &tcb->snd.sacks[i],
+                               sizeof(struct sack_block) * (tcb->snd.nr_sacks - i));
+                       tcb_sack->left = seg_sack->left;
+                       tcb_sack->right = seg_sack->right;
+                       tcb->snd.nr_sacks++;
+                       merge_sacks_into(tcb, i);
+                       sack_has_changed(s, tcb, tcb_sack);
+                       return;
+               }
+       }
+       if (tcb->snd.nr_sacks == MAX_NR_SND_SACKS) {
+               /* We didn't find space in the sack array. */
+               tcb_sack = &tcb->snd.sacks[MAX_NR_SND_SACKS - 1];
+               /* Need to always maintain the rightmost sack, discarding the prev */
+               if (seq_gt(seg_sack->right, tcb_sack->right)) {
+                       tcb_sack->left = seg_sack->left;
+                       tcb_sack->right = seg_sack->right;
+                       sack_has_changed(s, tcb, tcb_sack);
+               }
+               return;
+       }
+       tcb_sack = &tcb->snd.sacks[tcb->snd.nr_sacks];
+       tcb->snd.nr_sacks++;
+       tcb_sack->left = seg_sack->left;
+       tcb_sack->right = seg_sack->right;
+       sack_has_changed(s, tcb, tcb_sack);
+}
+
+/* Given the packet seg, track the sacks in TCB.  There are a few things: if seg
+ * acks new data, some sacks might no longer be needed.  Some sacks might grow,
+ * we might add new sacks, either of which can cause a merger.
+ *
+ * The important thing is that we always have the max sack entry: it must be
+ * inserted for sure and findable.  We need that for our measurement of what
+ * packets are in the network.
+ *
+ * Note that we keep sacks that are below snd.rtx (and above
+ * seg.ack/tcb->snd.una) as best we can - we don't prune them.  We'll need those
+ * for the in_flight estimate.
+ *
+ * When we run out of room, we'll have to throw away a sack.  Anything we throw
+ * away below snd.rtx will be counted as 'in flight', even though it isn't.  If
+ * we throw away something greater than snd.rtx, we'll also retrans it.  For
+ * simplicity, we throw-away / replace the rightmost sack, since we're always
+ * maintaining a highest sack. */
+static void update_sacks(struct conv *s, Tcpctl *tcb, Tcp *seg)
+{
+       int prune = 0;
+       struct sack_block *tcb_sack;
+
+       for (int i = 0; i < tcb->snd.nr_sacks; i++) {
+               tcb_sack = &tcb->snd.sacks[i];
+               /* For the equality case, if they acked up to, but not including an old
+                * sack, they must have reneged it.  Otherwise they would have acked
+                * beyond the sack. */
+               if (seq_lt(seg->ack, tcb_sack->left))
+                       break;
+               prune++;
+       }
+       if (prune) {
+               memmove(tcb->snd.sacks, tcb->snd.sacks + prune,
+                       sizeof(struct sack_block) * (tcb->snd.nr_sacks - prune));
+               tcb->snd.nr_sacks -= prune;
+       }
+       for (int i = 0; i < seg->nr_sacks; i++) {
+               /* old sacks */
+               if (seq_lt(seg->sacks[i].left, seg->ack))
+                       continue;
+               /* buggy sack: out of range */
+               if (seq_gt(seg->sacks[i].right, tcb->snd.nxt))
+                       continue;
+               update_or_insert_sack(s, tcb, &seg->sacks[i]);
+       }
+}
+
+/* This is a little bit of an under estimate, since we assume a packet is lost
+ * once we have any sacks above it.  Overall, it's at most 2 * MSS of an
+ * overestimate.
+ *
+ * If we have no sacks (either reneged or never used) we'll assume all packets
+ * above snd.rtx are lost.  This will be the case for sackless fast rxmit
+ * (Dong's stuff) or for a timeout.  In the former case, this is probably not
+ * true, and in_flight should be higher, but we have no knowledge without the
+ * sacks. */
+static void set_in_flight(Tcpctl *tcb)
+{
+       struct sack_block *tcb_sack;
+       uint32_t in_flight = 0;
+       uint32_t from;
+
+       if (!tcb->snd.nr_sacks) {
+               tcb->snd.in_flight = tcb->snd.rtx - tcb->snd.una;
+               return;
+       }
+
+       /* Everything to the right of the unsacked */
+       tcb_sack = &tcb->snd.sacks[tcb->snd.nr_sacks - 1];
+       in_flight += tcb->snd.nxt - tcb_sack->right;
+
+       /* Everything retransed (from una to snd.rtx, minus sacked regions.  Note
+        * we only retrans at most the last sack's left edge.  snd.rtx will be
+        * advanced to the right edge of some sack (possibly the last one). */
+       from = tcb->snd.una;
+       for (int i = 0; i < tcb->snd.nr_sacks; i++) {
+               tcb_sack = &tcb->snd.sacks[i];
+               if (seq_ge(tcb_sack->left, tcb->snd.rtx))
+                       break;
+               assert(seq_ge(tcb->snd.rtx, tcb_sack->right));
+               in_flight += tcb_sack->left - from;
+               from = tcb_sack->right;
+       }
+       in_flight += tcb->snd.rtx - from;
+
+       tcb->snd.in_flight = in_flight;
+}
+
+static void reset_recovery(struct conv *s, Tcpctl *tcb)
+{
+       netlog(s->p->f, Logtcprxmt,
+              "%I.%d -> %I.%d: recovery complete, una %u, rtx %u, nxt %u, recovery %u\n",
+              s->laddr, s->lport, s->raddr, s->rport,
+              tcb->snd.una, tcb->snd.rtx, tcb->snd.nxt, tcb->snd.recovery_pt);
+       tcb->snd.recovery = 0;
+       tcb->snd.recovery_pt = 0;
+       tcb->snd.loss_hint = 0;
+       tcb->snd.flush_sacks = FALSE;
+       tcb->snd.sack_loss_hint = 0;
+}
+
+static bool is_dup_ack(Tcpctl *tcb, Tcp *seg)
+{
+       /* this is a pure ack w/o window update */
+       return (seg->ack == tcb->snd.una) &&
+              (tcb->snd.una != tcb->snd.nxt) &&
+              (seg->len == 0) &&
+              (seg->wnd == tcb->snd.wnd);
+}
+
+/* If we have sacks, we'll ignore dupacks and look at the sacks ahead of una
+ * (which are managed by the TCB).  The tcb will not have old sacks (below
+ * ack/snd.rtx).  Receivers often send sacks below their ack point when we are
+ * coming out of a loss, and we don't want those to count.
+ *
+ * Note the tcb could have sacks (in the future), but the receiver stopped using
+ * them (reneged).  We'll catch that with the RTO.  If we try to catch it here,
+ * we could get in a state where we never allow them to renege. */
+static bool is_potential_loss(Tcpctl *tcb, Tcp *seg)
+{
+       if (seg->nr_sacks > 0)
+               return tcb->snd.nr_sacks > 0;
+       else
+               return is_dup_ack(tcb, seg);
+}
+
+/* When we use timestamps for RTTM, RFC 7323 suggests scaling by
+ * expected_samples (per cwnd).  They say:
+ *
+ * ExpectedSamples = ceiling(FlightSize / (SMSS * 2))
+ *
+ * However, SMMS * 2 is really "number of bytes expected to be acked in a
+ * packet.".  We'll use 'acked' to approximate that.  When the receiver uses
+ * LRO, they'll send back large ACKs, which decreases the number of samples.
+ *
+ * If it turns out that all the divides are bad, we can just go back to not
+ * using expected_samples at all. */
+static int expected_samples_ts(Tcpctl *tcb, uint32_t acked)
+{
+       assert(acked);
+       return MAX(DIV_ROUND_UP(tcb->snd.nxt - tcb->snd.una, acked), 1);
+}
+
+/* Updates the RTT, given the currently sampled RTT and the number samples per
+ * cwnd.  For non-TS RTTM, that'll be 1. */
+static void update_rtt(Tcpctl *tcb, int rtt_sample, int expected_samples)
+{
+       int delta;
+
+       tcb->backoff = 0;
+       tcb->backedoff = 0;
+       if (tcb->srtt == 0) {
+               tcb->srtt = rtt_sample;
+               tcb->mdev = rtt_sample / 2;
+       } else {
+               delta = rtt_sample - tcb->srtt;
+               tcb->srtt += (delta >> RTTM_ALPHA_SHIFT) / expected_samples;
+               if (tcb->srtt <= 0)
+                       tcb->srtt = 1;
+               tcb->mdev += ((abs(delta) - tcb->mdev) >> RTTM_BRAVO_SHIFT) /
+                            expected_samples;
+               if (tcb->mdev <= 0)
+                       tcb->mdev = 1;
+       }
+       tcpsettimer(tcb);
+}
+
+static void update(struct conv *s, Tcp *seg)
+{
+       int rtt;
+       Tcpctl *tcb;
+       uint32_t acked, expand;
+       struct tcppriv *tpriv;
+
+       tpriv = s->p->priv;
+       tcb = (Tcpctl *) s->ptcl;
+
+       if (!seq_within(seg->ack, tcb->snd.una, tcb->snd.nxt))
+               return;
+
+       acked = seg->ack - tcb->snd.una;
+       tcb->snd.una = seg->ack;
+       if (seq_gt(seg->ack, tcb->snd.rtx))
+               tcb->snd.rtx = seg->ack;
+
+       update_sacks(s, tcb, seg);
+       set_in_flight(tcb);
+
+       /* We treat either a dupack or forward SACKs as a hint that there is a loss.
+        * The RFCs suggest three dupacks before treating it as a loss (alternative
+        * is reordered packets).  We'll treat three SACKs the same way. */
+       if (is_potential_loss(tcb, seg) && !tcb->snd.recovery) {
+               tcb->snd.loss_hint++;
+               if (tcb->snd.loss_hint == TCPREXMTTHRESH) {
+                       netlog(s->p->f, Logtcprxmt,
+                              "%I.%d -> %I.%d: loss hint thresh, nr sacks %u, nxt %u, una %u, cwnd %u\n",
+                              s->laddr, s->lport, s->raddr, s->rport,
+                              tcb->snd.nr_sacks, tcb->snd.nxt, tcb->snd.una, tcb->cwind);
+                       tcp_loss_event(s, tcb);
+                       tcb->snd.recovery_pt = tcb->snd.nxt;
+                       if (tcb->snd.nr_sacks) {
+                               tcb->snd.recovery = SACK_RETRANS_RECOVERY;
+                               tcb->snd.flush_sacks = FALSE;
+                               tcb->snd.sack_loss_hint = 0;
+                       } else {
+                               tcb->snd.recovery = FAST_RETRANS_RECOVERY;
+                       }
+                       tcprxmit(s);
+               }
+       }
+
+       /*
+        *  update window
+        */
+       if (seq_gt(seg->ack, tcb->snd.wl2)
+               || (tcb->snd.wl2 == seg->ack && seg->wnd > tcb->snd.wnd)) {
+               tcb->snd.wnd = seg->wnd;
+               tcb->snd.wl2 = seg->ack;
+       }
+
+       if (!acked) {
+               /*
+                *  don't let us hangup if sending into a closed window and
+                *  we're still getting acks
+                */
+               if (tcb->snd.recovery && (tcb->snd.wnd == 0))
+                       tcb->backedoff = MAXBACKMS / 4;
+               return;
+       }
+       /* At this point, they have acked something new. (positive ack, ack > una).
+        *
+        * If we hadn't reached the threshold for recovery yet, the positive ACK
+        * will reset our loss_hint count. */
+       if (!tcb->snd.recovery)
+               tcb->snd.loss_hint = 0;
+       else if (seq_ge(seg->ack, tcb->snd.recovery_pt))
+               reset_recovery(s, tcb);
+
+       /* avoid slow start and timers for SYN acks */
+       if ((tcb->flags & SYNACK) == 0) {
+               tcb->flags |= SYNACK;
+               acked--;
+               tcb->flgcnt--;
+               goto done;
+       }
+
+       /* slow start as long as we're not recovering from lost packets */
+       if (tcb->cwind < tcb->snd.wnd && !tcb->snd.recovery) {
+               if (tcb->cwind < tcb->ssthresh) {
+                       /* We increase the cwind by every byte we receive.  We want to
+                        * increase the cwind by one MSS for every MSS that gets ACKed.
+                        * Note that multiple MSSs can be ACKed in a single ACK.  If we had
+                        * a remainder of acked / MSS, we'd add just that remainder - not 0
+                        * or 1 MSS. */
+                       expand = acked;
+               } else {
+                       /* Every RTT, which consists of CWND bytes, we're supposed to expand
+                        * by MSS bytes.  The classic algorithm was
+                        *              expand = (tcb->mss * tcb->mss) / tcb->cwind;
+                        * which assumes the ACK was for MSS bytes.  Instead, for every
+                        * 'acked' bytes, we increase the window by acked / CWND (in units
+                        * of MSS). */
+                       expand = MAX(acked, tcb->typical_mss) * tcb->typical_mss
+                                / tcb->cwind;
+               }
+
+               if (tcb->cwind + expand < tcb->cwind)
+                       expand = tcb->snd.wnd - tcb->cwind;
+               if (tcb->cwind + expand > tcb->snd.wnd)
+                       expand = tcb->snd.wnd - tcb->cwind;
+               tcb->cwind += expand;
+       }
+       adjust_tx_qio_limit(s);
+
+       if (tcb->ts_recent) {
+               update_rtt(tcb, abs(milliseconds() - seg->ts_ecr),
+                          expected_samples_ts(tcb, acked));
+       } else if (tcb->rtt_timer.state == TcptimerON &&
+                  seq_ge(seg->ack, tcb->rttseq)) {
+               /* Adjust the timers according to the round trip time */
+               tcphalt(tpriv, &tcb->rtt_timer);
+               if (!tcb->snd.recovery) {
+                       rtt = tcb->rtt_timer.start - tcb->rtt_timer.count;
+                       if (rtt == 0)
+                               rtt = 1;        /* o/w all close systems will rexmit in 0 time */
+                       rtt *= MSPTICK;
+                       update_rtt(tcb, rtt, 1);
+               }
+       }
+
+done:
+       if (qdiscard(s->wq, acked) < acked) {
+               tcb->flgcnt--;
+               /* This happened due to another bug where acked was very large
+                * (negative), which was interpreted as "hey, one less flag, since they
+                * acked one of our flags (like a SYN).  If flgcnt goes negative,
+                * get_xmit_segment() will attempt to send out large packets. */
+               assert(tcb->flgcnt >= 0);
+       }
+
+       if (seq_gt(seg->ack, tcb->snd.urg))
+               tcb->snd.urg = seg->ack;
+
+       if (tcb->snd.una != tcb->snd.nxt)
+               tcpgo(tpriv, &tcb->timer);
+       else
+               tcphalt(tpriv, &tcb->timer);
+
+       tcb->backoff = 0;
+       tcb->backedoff = 0;
+}
+
+static void update_tcb_ts(Tcpctl *tcb, Tcp *seg)
+{
+       /* Get timestamp info from the tcp header.  Even though the timestamps
+        * aren't sequence numbers, we still need to protect for wraparound.  Though
+        * if the values were 0, assume that means we need an update.  We could have
+        * an initial ts_val that appears negative (signed). */
+       if (!tcb->ts_recent || !tcb->last_ack_sent ||
+           (seq_ge(seg->ts_val, tcb->ts_recent) &&
+            seq_le(seg->seq, tcb->last_ack_sent)))
+               tcb->ts_recent = seg->ts_val;
+}
+
+/* Overlap happens when one sack's left edge is inside another sack. */
+static bool sacks_overlap(struct sack_block *x, struct sack_block *y)
+{
+       return (seq_le(x->left, y->left) && seq_le(y->left, x->right)) ||
+              (seq_le(y->left, x->left) && seq_le(x->left, y->right));
+}
+
+static void make_sack_first(Tcpctl *tcb, struct sack_block *tcb_sack)
+{
+       struct sack_block temp;
+
+       if (tcb_sack == &tcb->rcv.sacks[0])
+               return;
+       temp = tcb->rcv.sacks[0];
+       tcb->rcv.sacks[0] = *tcb_sack;
+       *tcb_sack = temp;
+}
+
+/* Track sack in our tcb for a block of data we received.  This handles all the
+ * stuff: making sure sack is first (since it's the most recent sack change),
+ * updating or merging sacks, and dropping excess sacks (we only need to
+ * maintain 3).  Unlike on the snd side, our tcb sacks are *not* sorted. */
+static void track_rcv_sack(Tcpctl *tcb, uint32_t left, uint32_t right)
+{
+       struct sack_block *tcb_sack;
+       struct sack_block sack[1];
+
+       if (!tcb->sack_ok)
+               return;
+       if (left == right)
+               return;
+       assert(seq_lt(left, right));
+       sack->left = left;
+       sack->right = right;
+       /* We can reuse an existing sack if we're merging or overlapping. */
+       for (int i = 0; i < tcb->rcv.nr_sacks; i++) {
+               tcb_sack = &tcb->rcv.sacks[i];
+               if (sacks_overlap(tcb_sack, sack)) {
+                       tcb_sack->left = seq_min(tcb_sack->left, sack->left);
+                       tcb_sack->right = seq_max(tcb_sack->right, sack->right);
+                       make_sack_first(tcb, tcb_sack);
+                       return;
+               }
+       }
+       /* We can discard the last sack (right shift) - we should have sent it at
+        * least once by now.  If not, oh well. */
+       memmove(tcb->rcv.sacks + 1, tcb->rcv.sacks, sizeof(struct sack_block) *
+               MIN(MAX_NR_RCV_SACKS - 1, tcb->rcv.nr_sacks));
+       tcb->rcv.sacks[0] = *sack;
+       if (tcb->rcv.nr_sacks < MAX_NR_RCV_SACKS)
+               tcb->rcv.nr_sacks++;
+}
+
+/* Once we receive everything and move rcv.nxt past a sack, we don't need to
+ * track it.  I've seen Linux report sacks in the past, but we probably
+ * shouldn't. */
+static void drop_old_rcv_sacks(Tcpctl *tcb)
+{
+       struct sack_block *tcb_sack;
+
+       for (int i = 0; i < tcb->rcv.nr_sacks; i++) {
+               tcb_sack = &tcb->rcv.sacks[i];
+               /* Moving up to or past the left is enough to drop it. */
+               if (seq_ge(tcb->rcv.nxt, tcb_sack->left)) {
+                       memmove(tcb->rcv.sacks + i, tcb->rcv.sacks + i + 1,
+                               sizeof(struct sack_block) * (tcb->rcv.nr_sacks - i - 1));
+                       tcb->rcv.nr_sacks--;
+                       i--;
+               }
+       }
+}
+
+static void tcpiput(struct Proto *tcp, struct Ipifc *unused, struct block *bp)
+{
+       ERRSTACK(1);
+       Tcp seg;
+       Tcp4hdr *h4;
+       Tcp6hdr *h6;
+       int hdrlen;
+       Tcpctl *tcb;
+       uint16_t length;
+       uint8_t source[IPaddrlen], dest[IPaddrlen];
+       struct conv *s;
+       struct Fs *f;
+       struct tcppriv *tpriv;
+       uint8_t version;
+
+       f = tcp->f;
+       tpriv = tcp->priv;
+
+       tpriv->stats[InSegs]++;
+
+       h4 = (Tcp4hdr *) (bp->rp);
+       h6 = (Tcp6hdr *) (bp->rp);
+
+       if ((h4->vihl & 0xF0) == IP_VER4) {
+               uint8_t ttl;
+
+               version = V4;
+               length = nhgets(h4->length);
+               v4tov6(dest, h4->tcpdst);
+               v4tov6(source, h4->tcpsrc);
+
+               /* ttl isn't part of the xsum pseudo header, but bypass needs it. */
+               ttl = h4->Unused;
+               h4->Unused = 0;
+               hnputs(h4->tcplen, length - TCP4_PKT);
+               if (!(bp->flag & Btcpck) && (h4->tcpcksum[0] || h4->tcpcksum[1]) &&
+                       ptclcsum(bp, TCP4_IPLEN, length - TCP4_IPLEN)) {
+                       tpriv->stats[CsumErrs]++;
+                       tpriv->stats[InErrs]++;
+                       netlog(f, Logtcp, "bad tcp proto cksum\n");
+                       freeblist(bp);
+                       return;
+               }
+               h4->Unused = ttl;
+
+               hdrlen = ntohtcp4(&seg, &bp);
+               if (hdrlen < 0) {
+                       tpriv->stats[HlenErrs]++;
+                       tpriv->stats[InErrs]++;
+                       netlog(f, Logtcp, "bad tcp hdr len\n");
+                       return;
+               }
+
+               s = iphtlook(&tpriv->ht, source, seg.source, dest, seg.dest);
+               if (s && s->state == Bypass) {
+                       bypass_or_drop(s, bp);
+                       return;
+               }
+
+               /* trim the packet to the size claimed by the datagram */
+               length -= hdrlen + TCP4_PKT;
+               bp = trimblock(bp, hdrlen + TCP4_PKT, length);
+               if (bp == NULL) {
+                       tpriv->stats[LenErrs]++;
+                       tpriv->stats[InErrs]++;
+                       netlog(f, Logtcp, "tcp len < 0 after trim\n");
+                       return;
+               }
+       } else {
+               int ttl = h6->ttl;
+               int proto = h6->proto;
+
+               version = V6;
+               length = nhgets(h6->ploadlen);
+               ipmove(dest, h6->tcpdst);
+               ipmove(source, h6->tcpsrc);
+
+               h6->ploadlen[0] = h6->ploadlen[1] = h6->proto = 0;
+               h6->ttl = proto;
+               hnputl(h6->vcf, length);
+               if ((h6->tcpcksum[0] || h6->tcpcksum[1]) &&
+                       ptclcsum(bp, TCP6_IPLEN, length + TCP6_PHDRSIZE)) {
+                       tpriv->stats[CsumErrs]++;
+                       tpriv->stats[InErrs]++;
+                       netlog(f, Logtcp, "bad tcp proto cksum\n");
+                       freeblist(bp);
+                       return;
+               }
+               h6->ttl = ttl;
+               h6->proto = proto;
+               hnputs(h6->ploadlen, length);
+
+               hdrlen = ntohtcp6(&seg, &bp);
+               if (hdrlen < 0) {
+                       tpriv->stats[HlenErrs]++;
+                       tpriv->stats[InErrs]++;
+                       netlog(f, Logtcp, "bad tcp hdr len\n");
+                       return;
+               }
+
+               s = iphtlook(&tpriv->ht, source, seg.source, dest, seg.dest);
+               if (s && s->state == Bypass) {
+                       bypass_or_drop(s, bp);
+                       return;
+               }
+
+               /* trim the packet to the size claimed by the datagram */
+               length -= hdrlen;
+               bp = trimblock(bp, hdrlen + TCP6_PKT, length);
+               if (bp == NULL) {
+                       tpriv->stats[LenErrs]++;
+                       tpriv->stats[InErrs]++;
+                       netlog(f, Logtcp, "tcp len < 0 after trim\n");
+                       return;
+               }
+       }
+
+       /* s, the conv matching the n-tuple, was set above */
+       if (s == NULL) {
+               netlog(f, Logtcpreset, "iphtlook failed: src %I:%u, dst %I:%u\n",
+                      source, seg.source, dest, seg.dest);
+reset:
+               sndrst(tcp, source, dest, length, &seg, version, "no conversation");
+               freeblist(bp);
+               return;
+       }
+
+       /* lock protocol for unstate Plan 9 invariants.  funcs like limbo or
+        * incoming might rely on it. */
+       qlock(&tcp->qlock);
+
+       /* if it's a listener, look for the right flags and get a new conv */
+       tcb = (Tcpctl *) s->ptcl;
+       if (tcb->state == Listen) {
+               if (seg.flags & RST) {
+                       limborst(s, &seg, source, dest, version);
+                       qunlock(&tcp->qlock);
+                       freeblist(bp);
+                       return;
+               }
+
+               /* if this is a new SYN, put the call into limbo */
+               if ((seg.flags & SYN) && (seg.flags & ACK) == 0) {
+                       limbo(s, source, dest, &seg, version);
+                       qunlock(&tcp->qlock);
+                       freeblist(bp);
+                       return;
+               }
+
+               /* if there's a matching call in limbo, tcpincoming will return it */
+               s = tcpincoming(s, &seg, source, dest, version);
+               if (s == NULL) {
+                       qunlock(&tcp->qlock);
+                       goto reset;
+               }
+       }
+
+       /* The rest of the input state machine is run with the control block
+        * locked and implements the state machine directly out of the RFC.
+        * Out-of-band data is ignored - it was always a bad idea.
+        */
+       tcb = (Tcpctl *) s->ptcl;
+       if (waserror()) {
+               qunlock(&s->qlock);
+               nexterror();
+       }
+       qlock(&s->qlock);
+       qunlock(&tcp->qlock);
+
+       update_tcb_ts(tcb, &seg);
+       /* fix up window */
+       seg.wnd <<= tcb->rcv.scale;
+
+       /* every input packet in puts off the keep alive time out */
+       tcpsetkacounter(tcb);
+
+       switch (tcb->state) {
+               case Closed:
+                       sndrst(tcp, source, dest, length, &seg, version,
+                                  "sending to Closed");
+                       goto raise;
+               case Syn_sent:
+                       if (seg.flags & ACK) {
+                               if (!seq_within(seg.ack, tcb->iss + 1, tcb->snd.nxt)) {
+                                       sndrst(tcp, source, dest, length, &seg, version,
+                                                  "bad seq in Syn_sent");
+                                       goto raise;
+                               }
+                       }
+                       if (seg.flags & RST) {
+                               if (seg.flags & ACK)
+                                       localclose(s, "connection refused");
+                               goto raise;
+                       }
+
+                       if (seg.flags & SYN) {
+                               procsyn(s, &seg);
+                               if (seg.flags & ACK) {
+                                       update(s, &seg);
+                                       tcpsynackrtt(s);
+                                       tcpsetstate(s, Established);
+                                       /* Here's where we get the results of header option
+                                        * negotiations for connections we started. (SYNACK has the
+                                        * response) */
+                                       tcpsetscale(s, tcb, seg.ws, tcb->scale);
+                                       tcb->sack_ok = seg.sack_ok;
+                               } else {
+                                       sndrst(tcp, source, dest, length, &seg, version,
+                                                  "Got SYN with no ACK");
+                                       goto raise;
+                               }
+
+                               if (length != 0 || (seg.flags & FIN))
+                                       break;
+
+                               freeblist(bp);
+                               goto output;
+                       } else
+                               freeblist(bp);
+
+                       qunlock(&s->qlock);
+                       poperror();
+                       return;
+       }
+
+       /*
+        *  One DOS attack is to open connections to us and then forget about them,
+        *  thereby tying up a conv at no long term cost to the attacker.
+        *  This is an attempt to defeat these stateless DOS attacks.  See
+        *  corresponding code in tcpsendka().
+        */
+       if ((seg.flags & RST) == 0) {
+               if (tcpporthogdefense
+                       && seq_within(seg.ack, tcb->snd.una - (1 << 31),
+                                                 tcb->snd.una - (1 << 29))) {
+                       printd("stateless hog %I.%d->%I.%d f 0x%x 0x%lx - 0x%lx - 0x%lx\n",
+                                  source, seg.source, dest, seg.dest, seg.flags,
+                                  tcb->snd.una - (1 << 31), seg.ack, tcb->snd.una - (1 << 29));
+                       localclose(s, "stateless hog");
+               }
+       }
+
+       /* Cut the data to fit the receive window */
+       if (tcptrim(tcb, &seg, &bp, &length) == -1) {
+               netlog(f, Logtcp, "%I.%d -> %I.%d: tcp len < 0, %lu %d\n",
+                      s->raddr, s->rport, s->laddr, s->lport, seg.seq, length);
+               update(s, &seg);
+               if (qlen(s->wq) + tcb->flgcnt == 0 && tcb->state == Closing) {
+                       tcphalt(tpriv, &tcb->rtt_timer);
+                       tcphalt(tpriv, &tcb->acktimer);
+                       tcphalt(tpriv, &tcb->katimer);
+                       tcpsetstate(s, Time_wait);
+                       tcb->timer.start = MSL2 * (1000 / MSPTICK);
+                       tcpgo(tpriv, &tcb->timer);
+               }
+               if (!(seg.flags & RST)) {
+                       tcb->flags |= FORCE;
+                       goto output;
+               }
+               qunlock(&s->qlock);
+               poperror();
+               return;
+       }
+
+       /* Cannot accept so answer with a rst */
+       if (length && tcb->state == Closed) {
+               sndrst(tcp, source, dest, length, &seg, version, "sending to Closed");
+               goto raise;
+       }
+
+       /* The segment is beyond the current receive pointer so
+        * queue the data in the resequence queue
+        */
+       if (seg.seq != tcb->rcv.nxt)
+               if (length != 0 || (seg.flags & (SYN | FIN))) {
+                       update(s, &seg);
+                       if (addreseq(tcb, tpriv, &seg, bp, length) < 0)
+                               printd("reseq %I.%d -> %I.%d\n", s->raddr, s->rport, s->laddr,
+                                          s->lport);
+                       tcb->flags |= FORCE;
+                       goto output;
+               }
+
+       /*
+        *  keep looping till we've processed this packet plus any
+        *  adjacent packets in the resequence queue
+        */
+       for (;;) {
+               if (seg.flags & RST) {
+                       if (tcb->state == Established) {
+                               tpriv->stats[EstabResets]++;
+                               if (tcb->rcv.nxt != seg.seq)
+                                       printd
+                                               ("out of order RST rcvd: %I.%d -> %I.%d, rcv.nxt 0x%lx seq 0x%lx\n",
+                                                s->raddr, s->rport, s->laddr, s->lport, tcb->rcv.nxt,
+                                                seg.seq);
+                       }
+                       localclose(s, "connection refused");
+                       goto raise;
+               }
+
+               if ((seg.flags & ACK) == 0)
+                       goto raise;
+
+               switch (tcb->state) {
+                       case Established:
+                       case Close_wait:
+                               update(s, &seg);
+                               break;
+                       case Finwait1:
+                               update(s, &seg);
+                               if (qlen(s->wq) + tcb->flgcnt == 0) {
+                                       tcphalt(tpriv, &tcb->rtt_timer);
+                                       tcphalt(tpriv, &tcb->acktimer);
+                                       tcpsetkacounter(tcb);
+                                       tcb->time = NOW;
+                                       tcpsetstate(s, Finwait2);
+                                       tcb->katimer.start = MSL2 * (1000 / MSPTICK);
+                                       tcpgo(tpriv, &tcb->katimer);
+                               }
+                               break;
+                       case Finwait2:
+                               update(s, &seg);
+                               break;
+                       case Closing:
+                               update(s, &seg);
+                               if (qlen(s->wq) + tcb->flgcnt == 0) {
+                                       tcphalt(tpriv, &tcb->rtt_timer);
+                                       tcphalt(tpriv, &tcb->acktimer);
+                                       tcphalt(tpriv, &tcb->katimer);
+                                       tcpsetstate(s, Time_wait);
+                                       tcb->timer.start = MSL2 * (1000 / MSPTICK);
+                                       tcpgo(tpriv, &tcb->timer);
+                               }
+                               break;
+                       case Last_ack:
+                               update(s, &seg);
+                               if (qlen(s->wq) + tcb->flgcnt == 0) {
+                                       localclose(s, NULL);
+                                       goto raise;
+                               }
+                       case Time_wait:
+                               tcb->flags |= FORCE;
+                               if (tcb->timer.state != TcptimerON)
+                                       tcpgo(tpriv, &tcb->timer);
+               }
+
+               if ((seg.flags & URG) && seg.urg) {
+                       if (seq_gt(seg.urg + seg.seq, tcb->rcv.urg)) {
+                               tcb->rcv.urg = seg.urg + seg.seq;
+                               pullblock(&bp, seg.urg);
+                       }
+               } else if (seq_gt(tcb->rcv.nxt, tcb->rcv.urg))
+                       tcb->rcv.urg = tcb->rcv.nxt;
+
+               if (length == 0) {
+                       if (bp != NULL)
+                               freeblist(bp);
+               } else {
+                       switch (tcb->state) {
+                               default:
+                                       /* Ignore segment text */
+                                       if (bp != NULL)
+                                               freeblist(bp);
+                                       break;
+
+                               case Established:
+                               case Finwait1:
+                                       /* If we still have some data place on
+                                        * receive queue
+                                        */
+                                       if (bp) {
+                                               bp = packblock(bp);
+                                               if (bp == NULL)
+                                                       panic("tcp packblock");
+                                               qpassnolim(s->rq, bp);
+                                               bp = NULL;
+
+                                               /*
+                                                *  Force an ack every 2 data messages.  This is
+                                                *  a hack for rob to make his home system run
+                                                *  faster.
+                                                *
+                                                *  this also keeps the standard TCP congestion
+                                                *  control working since it needs an ack every
+                                                *  2 max segs worth.  This is not quite that,
+                                                *  but under a real stream is equivalent since
+                                                *  every packet has a max seg in it.
+                                                */
+                                               if (++(tcb->rcv.una) >= 2)
+                                                       tcb->flags |= FORCE;
+                                       }
+                                       tcb->rcv.nxt += length;
+                                       drop_old_rcv_sacks(tcb);
+
+                                       /*
+                                        *  update our rcv window
+                                        */
+                                       tcprcvwin(s);
+
+                                       /*
+                                        *  turn on the acktimer if there's something
+                                        *  to ack
+                                        */
+                                       if (tcb->acktimer.state != TcptimerON)
+                                               tcpgo(tpriv, &tcb->acktimer);
+
+                                       break;
+                               case Finwait2:
+                                       /* no process to read the data, send a reset */
+                                       if (bp != NULL)
+                                               freeblist(bp);
+                                       sndrst(tcp, source, dest, length, &seg, version,
+                                                  "send to Finwait2");
+                                       qunlock(&s->qlock);
+                                       poperror();
+                                       return;
+                       }
+               }
+
+               if (seg.flags & FIN) {
+                       tcb->flags |= FORCE;
+
+                       switch (tcb->state) {
+                               case Established:
+                                       tcb->rcv.nxt++;
+                                       tcpsetstate(s, Close_wait);
+                                       break;
+                               case Finwait1:
+                                       tcb->rcv.nxt++;
+                                       if (qlen(s->wq) + tcb->flgcnt == 0) {
+                                               tcphalt(tpriv, &tcb->rtt_timer);
+                                               tcphalt(tpriv, &tcb->acktimer);
+                                               tcphalt(tpriv, &tcb->katimer);
+                                               tcpsetstate(s, Time_wait);
+                                               tcb->timer.start = MSL2 * (1000 / MSPTICK);
+                                               tcpgo(tpriv, &tcb->timer);
+                                       } else
+                                               tcpsetstate(s, Closing);
+                                       break;
+                               case Finwait2:
+                                       tcb->rcv.nxt++;
+                                       tcphalt(tpriv, &tcb->rtt_timer);
+                                       tcphalt(tpriv, &tcb->acktimer);
+                                       tcphalt(tpriv, &tcb->katimer);
+                                       tcpsetstate(s, Time_wait);
+                                       tcb->timer.start = MSL2 * (1000 / MSPTICK);
+                                       tcpgo(tpriv, &tcb->timer);
+                                       break;
+                               case Close_wait:
+                               case Closing:
+                               case Last_ack:
+                                       break;
+                               case Time_wait:
+                                       tcpgo(tpriv, &tcb->timer);
+                                       break;
+                       }
+               }
+
+               /*
+                *  get next adjacent segment from the resequence queue.
+                *  dump/trim any overlapping segments
+                */
+               for (;;) {
+                       if (tcb->reseq == NULL)
+                               goto output;
+
+                       if (seq_ge(tcb->rcv.nxt, tcb->reseq->seg.seq) == 0)
+                               goto output;
+
+                       getreseq(tcb, &seg, &bp, &length);
+
+                       if (tcptrim(tcb, &seg, &bp, &length) == 0)
+                               break;
+               }
+       }
+output:
+       tcpoutput(s);
+       qunlock(&s->qlock);
+       poperror();
+       return;
+raise:
+       qunlock(&s->qlock);
+       poperror();
+       freeblist(bp);
+       tcpkick(s);
+}
+
+/* The advertised mss = data + TCP headers */
+static uint16_t derive_payload_mss(Tcpctl *tcb)
+{
+       uint16_t payload_mss = tcb->mss;
+       uint16_t opt_size = 0;
+
+       if (tcb->ts_recent) {
+               opt_size += TS_LENGTH;
+               /* Note that when we're a SYN, we overestimate slightly.  This is safe,
+                * and not really a problem. */
+               opt_size += TS_SEND_PREPAD;
+       }
+       if (tcb->rcv.nr_sacks)
+               opt_size += 2 + tcb->rcv.nr_sacks * 8;
+       opt_size = ROUNDUP(opt_size, 4);
+       payload_mss -= opt_size;
+       return payload_mss;
+}
+
+/* Decreases the xmit amt, given the MSS / TSO. */
+static uint32_t throttle_for_mss(Tcpctl *tcb, uint32_t ssize,
+                                 uint16_t payload_mss, bool retrans)
+{
+       if (ssize > payload_mss) {
+               if ((tcb->flags & TSO) == 0) {
+                       ssize = payload_mss;
+               } else {
+                       /* Don't send too much.  32K is arbitrary.. */
+                       if (ssize > 32 * 1024)
+                               ssize = 32 * 1024;
+                       if (!retrans) {
+                               /* Clamp xmit to an integral MSS to avoid ragged tail segments
+                                * causing poor link utilization. */
+                               ssize = ROUNDDOWN(ssize, payload_mss);
+                       }
+               }
+       }
+       return ssize;
+}
+
+/* Reduces ssize for a variety of reasons.  Returns FALSE if we should abort
+ * sending the packet.  o/w returns TRUE and modifies ssize by reference. */
+static bool throttle_ssize(struct conv *s, Tcpctl *tcb, uint32_t *ssize_p,
+                           uint16_t payload_mss, bool retrans)
+{
+       struct Fs *f = s->p->f;
+       uint32_t usable;
+       uint32_t ssize = *ssize_p;
+
+       /* Compute usable segment based on offered window and limit
+        * window probes to one */
+       if (tcb->snd.wnd == 0) {
+               if (tcb->snd.in_flight != 0) {
+                       if ((tcb->flags & FORCE) == 0)
+                               return FALSE;
+               }
+               usable = 1;
+       } else {
+               usable = tcb->cwind;
+               if (tcb->snd.wnd < usable)
+                       usable = tcb->snd.wnd;
+               if (usable > tcb->snd.in_flight)
+                       usable -= tcb->snd.in_flight;
+               else
+                       usable = 0;
+               /* Avoid Silly Window Syndrome.  This is a little different thant RFC
+                * 813.  I took their additional enhancement of "< MSS" as an AND, not
+                * an OR.  25% of a large snd.wnd is pretty large, and our main goal is
+                * to avoid packets smaller than MSS.  I still use the 25% threshold,
+                * because it is important that there is *some* data in_flight.  If
+                * usable < MSS because snd.wnd is very small (but not 0), we might
+                * never get an ACK and would need to set up a timer.
+                *
+                * Also, I'm using 'ssize' as a proxy for a PSH point.  If there's just
+                * a small blob in the qio (or retrans!), then we might as well just
+                * send it. */
+               if ((usable < tcb->typical_mss) && (usable < tcb->snd.wnd >> 2)
+                   && (usable < ssize)) {
+                       return FALSE;
+               }
+       }
+       if (ssize && usable < 2)
+               netlog(s->p->f, Logtcpverbose,
+                      "%I.%d -> %I.%d: throttled snd.wnd %lu cwind %lu\n",
+                      s->laddr, s->lport, s->raddr, s->rport,
+                      tcb->snd.wnd, tcb->cwind);
+       if (usable < ssize)
+               ssize = usable;
+
+       ssize = throttle_for_mss(tcb, ssize, payload_mss, retrans);
+
+       *ssize_p = ssize;
+       return TRUE;
+}
+
+/* Helper, picks the next segment to send, which is possibly a retransmission.
+ * Returns TRUE if we have a segment, FALSE o/w.  Returns ssize, from_seq, and
+ * sent by reference.
+ *
+ * from_seq is the seq number we are transmitting from.
+ *
+ * sent includes all seq from una to from_seq *including* any previously sent
+ * flags (part of tcb->flgcnt), for instance an unacknowledged SYN (which counts
+ * as a seq number).  Those flags are in the e.g. snd.nxt - snd.una range, and
+ * they get dropped after qdiscard.
+ *
+ * ssize is the amount of data we are sending, starting from from_seq, and it
+ * will include any *new* flags, which haven't been accounted for yet.
+ *
+ * tcb->flgcnt consists of the flags both in ssize and in sent.
+ *
+ * Note that we could be in recovery and not sack_retrans a segment. */
+static bool get_xmit_segment(struct conv *s, Tcpctl *tcb, uint16_t payload_mss,
+                             uint32_t *from_seq_p, uint32_t *sent_p,
+                             uint32_t *ssize_p)
+{
+       struct Fs *f = s->p->f;
+       struct tcppriv *tpriv = s->p->priv;
+       uint32_t ssize, sent, from_seq;
+       bool sack_retrans = FALSE;
+       struct sack_block *tcb_sack = 0;
+
+       for (int i = 0; i < tcb->snd.nr_sacks; i++) {
+               tcb_sack = &tcb->snd.sacks[i];
+               if (seq_lt(tcb->snd.rtx, tcb_sack->left)) {
+                       /* So ssize is supposed to include any *new* flags to flgcnt, which
+                        * at this point would be a FIN.
+                        *
+                        * It might be possible that flgcnt is incremented so we send a FIN,
+                        * even for an intermediate sack retrans.  Perhaps the user closed
+                        * the conv.
+                        *
+                        * However, the way the "flgcnt for FIN" works is that it inflates
+                        * the desired amount we'd like to send (qlen + flgcnt).
+                        * Eventually, we reach the end of the queue and fail to extract all
+                        * of dsize.  At that point, we put on the FIN, and that's where the
+                        * extra 'byte' comes from.
+                        *
+                        * For sack retrans, since we're extracting from parts of the qio
+                        * that aren't the right-most edge, we don't need to consider flgcnt
+                        * when setting ssize. */
+                       from_seq = tcb->snd.rtx;
+                       sent = from_seq - tcb->snd.una;
+                       ssize = tcb_sack->left - from_seq;
+                       sack_retrans = TRUE;
+                       break;
+               }
+       }
+       /* SACK holes have first dibs, but we can still opportunisitically send new
+        * data.
+        *
+        * During other types of recovery, we'll just send from the retrans point.
+        * If we're in an RTO while we still have sacks, we could be resending data
+        * that wasn't lost.  Consider a sack that is still growing (usually the
+        * right-most), but we haven't received the ACK yet.  rxt may be included in
+        * that area.  Given we had two losses or otherwise timed out, I'm not too
+        * concerned.
+        *
+        * Note that Fast and RTO can send data beyond nxt.  If we change that,
+        * change the accounting below. */
+       if (!sack_retrans) {
+               switch (tcb->snd.recovery) {
+               default:
+               case SACK_RETRANS_RECOVERY:
+                       from_seq = tcb->snd.nxt;
+                       break;
+               case FAST_RETRANS_RECOVERY:
+               case RTO_RETRANS_RECOVERY:
+                       from_seq = tcb->snd.rtx;
+                       break;
+               }
+               sent = from_seq - tcb->snd.una;
+               /* qlen + flgcnt is every seq we want to have sent, including unack'd
+                * data, unacked flags, and new flags. */
+               ssize = qlen(s->wq) + tcb->flgcnt - sent;
+       }
+
+       if (!throttle_ssize(s, tcb, &ssize, payload_mss, sack_retrans))
+               return FALSE;
+
+       /* This counts flags, which is a little hokey, but it's okay since in_flight
+        * gets reset on each ACK */
+       tcb->snd.in_flight += ssize;
+       /* Log and track rxmit.  This covers both SACK (retrans) and fast rxmit. */
+       if (ssize && seq_lt(tcb->snd.rtx, tcb->snd.nxt)) {
+               netlog(f, Logtcpverbose,
+                      "%I.%d -> %I.%d: rxmit: rtx %u amt %u, nxt %u\n",
+                      s->laddr, s->lport, s->raddr, s->rport,
+                      tcb->snd.rtx, MIN(tcb->snd.nxt - tcb->snd.rtx, ssize),
+                      tcb->snd.nxt);
+               tpriv->stats[RetransSegs]++;
+       }
+       if (sack_retrans) {
+               /* If we'll send up to the left edge, advance snd.rtx to the right.
+                *
+                * This includes the largest sack.  It might get removed later, in which
+                * case we'll underestimate the amount in-flight.  The alternative is to
+                * not count the rightmost sack, but when it gets removed, we'll retrans
+                * it anyway.  No matter what, we'd count it. */
+               tcb->snd.rtx += ssize;
+               if (tcb->snd.rtx == tcb_sack->left)
+                       tcb->snd.rtx = tcb_sack->right;
+               /* RFC 6675 says we MAY rearm the RTO timer on each retrans, since we
+                * might not be getting ACKs for a while. */
+               tcpsettimer(tcb);
+       } else {
+               switch (tcb->snd.recovery) {
+               default:
+                       /* under normal op, we drag rtx along with nxt.  this prevents us
+                        * from sending sacks too early (up above), since rtx doesn't get
+                        * reset to una until we have a loss (e.g. 3 dupacks/sacks). */
+                       tcb->snd.nxt += ssize;
+                       tcb->snd.rtx = tcb->snd.nxt;
+                       break;
+               case SACK_RETRANS_RECOVERY:
+                       /* We explicitly do not want to increase rtx here.  We might still
+                        * need it to fill in a sack gap below nxt if we get new, higher
+                        * sacks. */
+                       tcb->snd.nxt += ssize;
+                       break;
+               case FAST_RETRANS_RECOVERY:
+               case RTO_RETRANS_RECOVERY:
+                       tcb->snd.rtx += ssize;
+                       /* Fast and RTO can send new data, advancing nxt. */
+                       if (seq_gt(tcb->snd.rtx, tcb->snd.nxt))
+                               tcb->snd.nxt = tcb->snd.rtx;
+                       break;
+               }
+       }
+       *from_seq_p = from_seq;
+       *sent_p = sent;
+       *ssize_p = ssize;
+
+       return TRUE;
+}
+
+/*
+ *  always enters and exits with the s locked.  We drop
+ *  the lock to ipoput the packet so some care has to be
+ *  taken by callers.
  */
-s16_t tcp_pcbs_sane(void)
-{
-  struct tcp_pcb *pcb;
-  for(pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) {
-    LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED);
-    LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN);
-    LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT);
-  }
-  for(pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) {
-    LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT);
-  }
-  return 1;
-}
-#endif /* TCP_DEBUG */
+static void tcpoutput(struct conv *s)
+{
+       Tcp seg;
+       int msgs;
+       int next_yield = 1;
+       Tcpctl *tcb;
+       struct block *hbp, *bp;
+       uint32_t ssize, dsize, sent, from_seq;
+       struct Fs *f;
+       struct tcppriv *tpriv;
+       uint8_t version;
+       uint16_t payload_mss;
+
+       f = s->p->f;
+       tpriv = s->p->priv;
+       version = s->ipversion;
+
+       for (msgs = 0; msgs < 100; msgs++) {
+               tcb = (Tcpctl *) s->ptcl;
+
+               switch (tcb->state) {
+                       case Listen:
+                       case Closed:
+                       case Finwait2:
+                               return;
+               }
+
+               /* force an ack when a window has opened up */
+               if (tcb->rcv.blocked && tcb->rcv.wnd >= tcb->mss) {
+                       tcb->rcv.blocked = 0;
+                       tcb->flags |= FORCE;
+               }
+
+               /* Don't send anything else until our SYN has been acked */
+               if (tcb->snd.nxt != tcb->iss && (tcb->flags & SYNACK) == 0)
+                       break;
+
+               /* payload_mss is the actual amount of data in the packet, which is the
+                * advertised (mss - header opts).  This varies from packet to packet,
+                * based on the options that might be present (e.g. always timestamps,
+                * sometimes SACKs) */
+               payload_mss = derive_payload_mss(tcb);
+
+               if (!get_xmit_segment(s, tcb, payload_mss, &from_seq, &sent, &ssize))
+                       break;
+
+               dsize = ssize;
+               seg.urg = 0;
+
+               if (ssize == 0)
+                       if ((tcb->flags & FORCE) == 0)
+                               break;
+
+               tcb->flags &= ~FORCE;
+               tcprcvwin(s);
+
+               /* By default we will generate an ack, so we can normally turn off the
+                * timer.  If we're blocked, we'll want the timer so we can send a
+                * window update. */
+               if (!tcb->rcv.blocked)
+                       tcphalt(tpriv, &tcb->acktimer);
+               tcb->rcv.una = 0;
+               seg.source = s->lport;
+               seg.dest = s->rport;
+               seg.flags = ACK;
+               seg.mss = 0;
+               seg.ws = 0;
+               seg.sack_ok = FALSE;
+               seg.nr_sacks = 0;
+               /* When outputting, Syn_sent means "send the Syn", for connections we
+                * initiate.  SYNACKs are sent from sndsynack directly. */
+               if (tcb->state == Syn_sent) {
+                       seg.flags = 0;
+                       seg.sack_ok = SACK_SUPPORTED;   /* here's where we advertise SACK */
+                       if (tcb->snd.nxt - ssize == tcb->iss) {
+                               seg.flags |= SYN;
+                               dsize--;
+                               seg.mss = tcb->mss;
+                               seg.ws = tcb->scale;
+                       } else {
+                               /* TODO: Not sure why we'd get here. */
+                               warn("TCP: weird Syn_sent state, tell someone you saw this");
+                       }
+               }
+               seg.seq = from_seq;
+               seg.ack = tcb->rcv.nxt;
+               tcb->last_ack_sent = seg.ack;
+               seg.wnd = tcb->rcv.wnd;
+               seg.ts_val = tcb->ts_recent;
+
+               /* Pull out data to send */
+               bp = NULL;
+               if (dsize != 0) {
+                       bp = qcopy(s->wq, dsize, sent);
+                       if (BLEN(bp) != dsize) {
+                               /* Here's where the flgcnt kicked in.  Note dsize is
+                                * decremented, but ssize isn't.  Not that we use ssize for much
+                                * anymore.  Decrementing dsize prevents us from sending a PSH
+                                * with the FIN. */
+                               seg.flags |= FIN;
+                               dsize--;
+                       }
+                       if (BLEN(bp) > payload_mss) {
+                               bp->flag |= Btso;
+                               bp->mss = payload_mss;
+                       }
+               }
+
+               if (sent + dsize == qlen(s->wq) + tcb->flgcnt)
+                       seg.flags |= PSH;
+
+               /* Build header, link data and compute cksum */
+               switch (version) {
+                       case V4:
+                               tcb->protohdr.tcp4hdr.vihl = IP_VER4;
+                               hbp = htontcp4(&seg, bp, &tcb->protohdr.tcp4hdr, tcb);
+                               if (hbp == NULL) {
+                                       freeblist(bp);
+                                       return;
+                               }
+                               break;
+                       case V6:
+                               tcb->protohdr.tcp6hdr.vcf[0] = IP_VER6;
+                               hbp = htontcp6(&seg, bp, &tcb->protohdr.tcp6hdr, tcb);
+                               if (hbp == NULL) {
+                                       freeblist(bp);
+                                       return;
+                               }
+                               break;
+                       default:
+                               hbp = NULL;     /* to suppress a warning */
+                               panic("tcpoutput: version %d", version);
+               }
+
+               /* Start the transmission timers if there is new data and we
+                * expect acknowledges
+                */
+               if (ssize != 0) {
+                       if (tcb->timer.state != TcptimerON)
+                               tcpgo(tpriv, &tcb->timer);
+
+                       if (!tcb->ts_recent && (tcb->rtt_timer.state != TcptimerON)) {
+                               /* If round trip timer isn't running, start it. */
+                               tcpgo(tpriv, &tcb->rtt_timer);
+                               tcb->rttseq = from_seq + ssize;
+                       }
+               }
+
+               tpriv->stats[OutSegs]++;
+
+               /* put off the next keep alive */
+               tcpgo(tpriv, &tcb->katimer);
+
+               switch (version) {
+                       case V4:
+                               if (ipoput4(f, hbp, 0, s->ttl, s->tos, s) < 0) {
+                                       /* a negative return means no route */
+                                       localclose(s, "no route");
+                               }
+                               break;
+                       case V6:
+                               if (ipoput6(f, hbp, 0, s->ttl, s->tos, s) < 0) {
+                                       /* a negative return means no route */
+                                       localclose(s, "no route");
+                               }
+                               break;
+                       default:
+                               panic("tcpoutput2: version %d", version);
+               }
+               if (ssize) {
+                       /* The outer loop thinks we sent one packet.  If we used TSO, we
+                        * might have sent several.  Minus one for the loop increment. */
+                       msgs += DIV_ROUND_UP(ssize, payload_mss) - 1;
+               }
+               /* Old Plan 9 tidbit - yield every four messages.  We want to break out
+                * and unlock so we can process inbound ACKs which might do things like
+                * say "slow down". */
+               if (msgs >= next_yield) {
+                       next_yield = msgs + 4;
+                       qunlock(&s->qlock);
+                       kthread_yield();
+                       qlock(&s->qlock);
+               }
+       }
+}
+
+/*
+ *  the BSD convention (hack?) for keep alives.  resend last uint8_t acked.
+ */
+static void tcpsendka(struct conv *s)
+{
+       Tcp seg;
+       Tcpctl *tcb;
+       struct block *hbp, *dbp;
+
+       tcb = (Tcpctl *) s->ptcl;
+
+       dbp = NULL;
+       seg.urg = 0;
+       seg.source = s->lport;
+       seg.dest = s->rport;
+       seg.flags = ACK | PSH;
+       seg.mss = 0;
+       seg.ws = 0;
+       seg.sack_ok = FALSE;
+       seg.nr_sacks = 0;
+       if (tcpporthogdefense)
+               urandom_read(&seg.seq, sizeof(seg.seq));
+       else
+               seg.seq = tcb->snd.una - 1;
+       seg.ack = tcb->rcv.nxt;
+       tcb->last_ack_sent = seg.ack;
+       tcb->rcv.una = 0;
+       seg.wnd = tcb->rcv.wnd;
+       seg.ts_val = tcb->ts_recent;
+       if (tcb->state == Finwait2) {
+               seg.flags |= FIN;
+       } else {
+               dbp = block_alloc(1, MEM_WAIT);
+               dbp->wp++;
+       }
+
+       if (isv4(s->raddr)) {
+               /* Build header, link data and compute cksum */
+               tcb->protohdr.tcp4hdr.vihl = IP_VER4;
+               hbp = htontcp4(&seg, dbp, &tcb->protohdr.tcp4hdr, tcb);
+               if (hbp == NULL) {
+                       freeblist(dbp);
+                       return;
+               }
+               ipoput4(s->p->f, hbp, 0, s->ttl, s->tos, s);
+       } else {
+               /* Build header, link data and compute cksum */
+               tcb->protohdr.tcp6hdr.vcf[0] = IP_VER6;
+               hbp = htontcp6(&seg, dbp, &tcb->protohdr.tcp6hdr, tcb);
+               if (hbp == NULL) {
+                       freeblist(dbp);
+                       return;
+               }
+               ipoput6(s->p->f, hbp, 0, s->ttl, s->tos, s);
+       }
+}
+
+/*
+ *  set connection to time out after 12 minutes
+ */
+static void tcpsetkacounter(Tcpctl *tcb)
+{
+       tcb->kacounter = (12 * 60 * 1000) / (tcb->katimer.start * MSPTICK);
+       if (tcb->kacounter < 3)
+               tcb->kacounter = 3;
+}
+
+/*
+ *  if we've timed out, close the connection
+ *  otherwise, send a keepalive and restart the timer
+ */
+static void tcpkeepalive(void *v)
+{
+       ERRSTACK(1);
+       Tcpctl *tcb;
+       struct conv *s;
+
+       s = v;
+       tcb = (Tcpctl *) s->ptcl;
+       qlock(&s->qlock);
+       if (waserror()) {
+               qunlock(&s->qlock);
+               nexterror();
+       }
+       if (tcb->state != Closed) {
+               if (--(tcb->kacounter) <= 0) {
+                       localclose(s, "connection timed out");
+               } else {
+                       tcpsendka(s);
+                       tcpgo(s->p->priv, &tcb->katimer);
+               }
+       }
+       qunlock(&s->qlock);
+       poperror();
+}
+
+/*
+ *  start keepalive timer
+ */
+static void tcpstartka(struct conv *s, char **f, int n)
+{
+       Tcpctl *tcb;
+       int x;
+
+       tcb = (Tcpctl *) s->ptcl;
+       if (tcb->state != Established)
+               error(ENOTCONN, "connection must be in Establised state");
+       if (n > 1) {
+               x = atoi(f[1]);
+               if (x >= MSPTICK)
+                       tcb->katimer.start = x / MSPTICK;
+       }
+       tcpsetkacounter(tcb);
+       tcpgo(s->p->priv, &tcb->katimer);
+}
+
+/*
+ *  turn checksums on/off
+ */
+static void tcpsetchecksum(struct conv *s, char **f, int unused)
+{
+       Tcpctl *tcb;
+
+       tcb = (Tcpctl *) s->ptcl;
+       tcb->nochecksum = !atoi(f[1]);
+}
+
+static void tcp_loss_event(struct conv *s, Tcpctl *tcb)
+{
+       uint32_t old_cwnd = tcb->cwind;
+
+       /* Reno */
+       tcb->ssthresh = tcb->cwind / 2;
+       tcb->cwind = tcb->ssthresh;
+       netlog(s->p->f, Logtcprxmt,
+              "%I.%d -> %I.%d: loss event, cwnd was %d, now %d\n",
+              s->laddr, s->lport, s->raddr, s->rport,
+              old_cwnd, tcb->cwind);
+}
+
+/* Called when we need to retrans the entire outstanding window (everything
+ * previously sent, but unacknowledged). */
+static void tcprxmit(struct conv *s)
+{
+       Tcpctl *tcb;
+
+       tcb = (Tcpctl *) s->ptcl;
+
+       tcb->flags |= FORCE;
+       tcb->snd.rtx = tcb->snd.una;
+       set_in_flight(tcb);
+
+       tcpoutput(s);
+}
+
+/* The original RFC said to drop sacks on a timeout, since the receiver could
+ * renege.  Later RFCs say we can keep them around, so long as we are careful.
+ *
+ * We'll go with a "flush if we have two timeouts" plan.  This doesn't have to
+ * be perfect - there might be cases where we accidentally flush the sacks too
+ * often.  Perhaps we never get dup_acks to start fast/sack rxmit.  The main
+ * thing is that after multiple timeouts we flush the sacks, since the receiver
+ * might renege.
+ *
+ * We also have an Akaros-specific problem.  We use the sacks to determine
+ * in_flight.  Specifically, the (snd.nxt - upper right edge) is tracked as in
+ * flight.  Usually the receiver will keep sacking that right edge all the way
+ * up to snd.nxt, but they might not, and the gap might be quite large.  After a
+ * timeout, that data is definitely not in flight.  If that block's size is
+ * greater than cwnd, we'll never transmit.  This should be rare, and in that
+ * case we can just dump the sacks.  The typical_mss fudge factor is so we can
+ * send a reasonably-sized packet. */
+static void timeout_handle_sacks(Tcpctl *tcb)
+{
+       struct sack_block *last_sack;
+
+       if (tcb->snd.nr_sacks) {
+               last_sack = &tcb->snd.sacks[tcb->snd.nr_sacks - 1];
+               if (tcb->snd.flush_sacks || (tcb->snd.nxt - last_sack->right >=
+                                            tcb->cwind - tcb->typical_mss)) {
+                       tcb->snd.nr_sacks = 0;
+                       tcb->snd.flush_sacks = FALSE;
+               } else {
+                       tcb->snd.flush_sacks = TRUE;
+               }
+       }
+}
+
+static void tcptimeout(void *arg)
+{
+       ERRSTACK(1);
+       struct conv *s;
+       Tcpctl *tcb;
+       int maxback;
+       struct tcppriv *tpriv;
+
+       s = (struct conv *)arg;
+       tpriv = s->p->priv;
+       tcb = (Tcpctl *) s->ptcl;
+
+       qlock(&s->qlock);
+       if (waserror()) {
+               qunlock(&s->qlock);
+               nexterror();
+       }
+       switch (tcb->state) {
+               default:
+                       tcb->backoff++;
+                       if (tcb->state == Syn_sent)
+                               maxback = MAXBACKMS / 2;
+                       else
+                               maxback = MAXBACKMS;
+                       tcb->backedoff += tcb->timer.start * MSPTICK;
+                       if (tcb->backedoff >= maxback) {
+                               localclose(s, "connection timed out");
+                               break;
+                       }
+                       netlog(s->p->f, Logtcprxmt,
+                              "%I.%d -> %I.%d: timeout rxmit una %u, rtx %u, nxt %u, in_flight %u, timer.start %u\n",
+                              s->laddr, s->lport, s->raddr, s->rport,
+                              tcb->snd.una, tcb->snd.rtx, tcb->snd.nxt, tcb->snd.in_flight,
+                              tcb->timer.start);
+                       tcpsettimer(tcb);
+                       tcp_loss_event(s, tcb);
+                       /* Advance the recovery point.  Any dupacks/sacks below this won't
+                        * trigger a new loss, since we won't reset_recovery() until we ack
+                        * past recovery_pt. */
+                       tcb->snd.recovery = RTO_RETRANS_RECOVERY;
+                       tcb->snd.recovery_pt = tcb->snd.nxt;
+                       timeout_handle_sacks(tcb);
+                       tcprxmit(s);
+                       tpriv->stats[RetransTimeouts]++;
+                       break;
+               case Time_wait:
+                       localclose(s, NULL);
+                       break;
+               case Closed:
+                       break;
+       }
+       qunlock(&s->qlock);
+       poperror();
+}
+
+static int inwindow(Tcpctl *tcb, int seq)
+{
+       return seq_within(seq, tcb->rcv.nxt, tcb->rcv.nxt + tcb->rcv.wnd - 1);
+}
+
+/*
+ *  set up state for a received SYN (or SYN ACK) packet
+ */
+static void procsyn(struct conv *s, Tcp *seg)
+{
+       Tcpctl *tcb;
+
+       tcb = (Tcpctl *) s->ptcl;
+       tcb->flags |= FORCE;
+
+       tcb->rcv.nxt = seg->seq + 1;
+       tcb->rcv.urg = tcb->rcv.nxt;
+       tcb->irs = seg->seq;
+
+       /* our sending max segment size cannot be bigger than what he asked for */
+       if (seg->mss != 0 && seg->mss < tcb->mss) {
+               tcb->mss = seg->mss;
+               tcb->typical_mss = tcb->mss;
+       }
+       adjust_typical_mss_for_opts(seg, tcb);
+
+       tcb->snd.wnd = seg->wnd;
+       tcb->cwind = tcb->typical_mss * CWIND_SCALE;
+}
+
+static int addreseq(Tcpctl *tcb, struct tcppriv *tpriv, Tcp *seg,
+                    struct block *bp, uint16_t length)
+{
+       Reseq *rp, *rp1;
+       int i, rqlen, qmax;
+
+       rp = kzmalloc(sizeof(Reseq), 0);
+       if (rp == NULL) {
+               freeblist(bp);  /* bp always consumed by add_reseq */
+               return 0;
+       }
+
+       rp->seg = *seg;
+       rp->bp = bp;
+       rp->length = length;
+
+       track_rcv_sack(tcb, seg->seq, seg->seq + length);
+       /* Place on reassembly list sorting by starting seq number */
+       rp1 = tcb->reseq;
+       if (rp1 == NULL || seq_lt(seg->seq, rp1->seg.seq)) {
+               rp->next = rp1;
+               tcb->reseq = rp;
+               if (rp->next != NULL)
+                       tpriv->stats[OutOfOrder]++;
+               return 0;
+       }
+
+       rqlen = 0;
+       for (i = 0;; i++) {
+               rqlen += rp1->length;
+               if (rp1->next == NULL || seq_lt(seg->seq, rp1->next->seg.seq)) {
+                       rp->next = rp1->next;
+                       rp1->next = rp;
+                       if (rp->next != NULL)
+                               tpriv->stats[OutOfOrder]++;
+                       break;
+               }
+               rp1 = rp1->next;
+       }
+       qmax = QMAX << tcb->rcv.scale;
+       /* Here's where we're reneging on previously reported sacks. */
+       if (rqlen > qmax) {
+               printd("resequence queue > window: %d > %d\n", rqlen, qmax);
+               i = 0;
+               for (rp1 = tcb->reseq; rp1 != NULL; rp1 = rp1->next) {
+                       printd("0x%#lx 0x%#lx 0x%#x\n", rp1->seg.seq,
+                                  rp1->seg.ack, rp1->seg.flags);
+                       if (i++ > 10) {
+                               printd("...\n");
+                               break;
+                       }
+               }
+
+               // delete entire reassembly queue; wait for retransmit.
+               // - should we be smarter and only delete the tail?
+               for (rp = tcb->reseq; rp != NULL; rp = rp1) {
+                       rp1 = rp->next;
+                       freeblist(rp->bp);
+                       kfree(rp);
+               }
+               tcb->reseq = NULL;
+               tcb->rcv.nr_sacks = 0;
+
+               return -1;
+       }
+       return 0;
+}
+
+static void getreseq(Tcpctl *tcb, Tcp *seg, struct block **bp, uint16_t *length)
+{
+       Reseq *rp;
+
+       rp = tcb->reseq;
+       if (rp == NULL)
+               return;
+
+       tcb->reseq = rp->next;
+
+       *seg = rp->seg;
+       *bp = rp->bp;
+       *length = rp->length;
+
+       kfree(rp);
+}
+
+static int tcptrim(Tcpctl *tcb, Tcp *seg, struct block **bp, uint16_t *length)
+{
+       uint16_t len;
+       uint8_t accept;
+       int dupcnt, excess;
+
+       accept = 0;
+       len = *length;
+       if (seg->flags & SYN)
+               len++;
+       if (seg->flags & FIN)
+               len++;
+
+       if (tcb->rcv.wnd == 0) {
+               if (len == 0 && seg->seq == tcb->rcv.nxt)
+                       return 0;
+       } else {
+               /* Some part of the segment should be in the window */
+               if (inwindow(tcb, seg->seq))
+                       accept++;
+               else if (len != 0) {
+                       if (inwindow(tcb, seg->seq + len - 1) ||
+                               seq_within(tcb->rcv.nxt, seg->seq, seg->seq + len - 1))
+                               accept++;
+               }
+       }
+       if (!accept) {
+               freeblist(*bp);
+               return -1;
+       }
+       dupcnt = tcb->rcv.nxt - seg->seq;
+       if (dupcnt > 0) {
+               tcb->rerecv += dupcnt;
+               if (seg->flags & SYN) {
+                       seg->flags &= ~SYN;
+                       seg->seq++;
+
+                       if (seg->urg > 1)
+                               seg->urg--;
+                       else
+                               seg->flags &= ~URG;
+                       dupcnt--;
+               }
+               if (dupcnt > 0) {
+                       pullblock(bp, (uint16_t) dupcnt);
+                       seg->seq += dupcnt;
+                       *length -= dupcnt;
+
+                       if (seg->urg > dupcnt)
+                               seg->urg -= dupcnt;
+                       else {
+                               seg->flags &= ~URG;
+                               seg->urg = 0;
+                       }
+               }
+       }
+       excess = seg->seq + *length - (tcb->rcv.nxt + tcb->rcv.wnd);
+       if (excess > 0) {
+               tcb->rerecv += excess;
+               *length -= excess;
+               *bp = trimblock(*bp, 0, *length);
+               if (*bp == NULL)
+                       panic("presotto is a boofhead");
+               seg->flags &= ~FIN;
+       }
+       return 0;
+}
+
+static void tcpadvise(struct Proto *tcp, struct block *bp, char *msg)
+{
+       Tcp4hdr *h4;
+       Tcp6hdr *h6;
+       Tcpctl *tcb;
+       uint8_t source[IPaddrlen];
+       uint8_t dest[IPaddrlen];
+       uint16_t psource, pdest;
+       struct conv *s, **p;
+
+       h4 = (Tcp4hdr *) (bp->rp);
+       h6 = (Tcp6hdr *) (bp->rp);
+
+       if ((h4->vihl & 0xF0) == IP_VER4) {
+               v4tov6(dest, h4->tcpdst);
+               v4tov6(source, h4->tcpsrc);
+               psource = nhgets(h4->tcpsport);
+               pdest = nhgets(h4->tcpdport);
+       } else {
+               ipmove(dest, h6->tcpdst);
+               ipmove(source, h6->tcpsrc);
+               psource = nhgets(h6->tcpsport);
+               pdest = nhgets(h6->tcpdport);
+       }
+
+       /* Look for a connection */
+       for (p = tcp->conv; *p; p++) {
+               s = *p;
+               tcb = (Tcpctl *) s->ptcl;
+               if (s->rport == pdest)
+                       if (s->lport == psource)
+                               if (tcb->state != Closed)
+                                       if (ipcmp(s->raddr, dest) == 0)
+                                               if (ipcmp(s->laddr, source) == 0) {
+                                                       qlock(&s->qlock);
+                                                       switch (tcb->state) {
+                                                               case Syn_sent:
+                                                                       localclose(s, msg);
+                                                                       break;
+                                                       }
+                                                       qunlock(&s->qlock);
+                                                       freeblist(bp);
+                                                       return;
+                                               }
+       }
+       freeblist(bp);
+}
+
+static void tcpporthogdefensectl(char *val)
+{
+       if (strcmp(val, "on") == 0)
+               tcpporthogdefense = 1;
+       else if (strcmp(val, "off") == 0)
+               tcpporthogdefense = 0;
+       else
+               error(EINVAL, "unknown value for tcpporthogdefense");
+}
+
+/* called with c qlocked */
+static void tcpctl(struct conv *c, char **f, int n)
+{
+       if (n == 1 && strcmp(f[0], "hangup") == 0)
+               tcphangup(c);
+       else if (n >= 1 && strcmp(f[0], "keepalive") == 0)
+               tcpstartka(c, f, n);
+       else if (n >= 1 && strcmp(f[0], "checksum") == 0)
+               tcpsetchecksum(c, f, n);
+       else if (n >= 1 && strcmp(f[0], "tcpporthogdefense") == 0)
+               tcpporthogdefensectl(f[1]);
+       else
+               error(EINVAL, "unknown command to %s", __func__);
+}
+
+static int tcpstats(struct Proto *tcp, char *buf, int len)
+{
+       struct tcppriv *priv;
+       char *p, *e;
+       int i;
+
+       priv = tcp->priv;
+       p = buf;
+       e = p + len;
+       for (i = 0; i < Nstats; i++)
+               p = seprintf(p, e, "%s: %u\n", statnames[i], priv->stats[i]);
+       return p - buf;
+}
+
+/*
+ *  garbage collect any stale conversations:
+ *     - SYN received but no SYN-ACK after 5 seconds (could be the SYN attack)
+ *     - Finwait2 after 5 minutes
+ *
+ *  this is called whenever we run out of channels.  Both checks are
+ *  of questionable validity so we try to use them only when we're
+ *  up against the wall.
+ */
+static int tcpgc(struct Proto *tcp)
+{
+       struct conv *c, **pp, **ep;
+       int n;
+       Tcpctl *tcb;
+
+       n = 0;
+       ep = &tcp->conv[tcp->nc];
+       for (pp = tcp->conv; pp < ep; pp++) {
+               c = *pp;
+               if (c == NULL)
+                       break;
+               if (!canqlock(&c->qlock))
+                       continue;
+               tcb = (Tcpctl *) c->ptcl;
+               if (tcb->state == Finwait2) {
+                       if (NOW - tcb->time > 5 * 60 * 1000) {
+                               localclose(c, "timed out");
+                               n++;
+                       }
+               }
+               qunlock(&c->qlock);
+       }
+       return n;
+}
+
+static void tcpsettimer(Tcpctl *tcb)
+{
+       int x;
+
+       /* round trip dependency */
+       x = backoff(tcb->backoff) * (tcb->srtt + MAX(4 * tcb->mdev, MSPTICK));
+       x = DIV_ROUND_UP(x, MSPTICK);
+
+       /* Bounded twixt 1/2 and 64 seconds.  RFC 6298 suggested min is 1 second. */
+       if (x < 500 / MSPTICK)
+               x = 500 / MSPTICK;
+       else if (x > (64000 / MSPTICK))
+               x = 64000 / MSPTICK;
+       tcb->timer.start = x;
+}
+
+static struct tcppriv *debug_priv;
+
+/* Kfunc this */
+int dump_tcp_ht(void)
+{
+       if (!debug_priv)
+               return -1;
+       dump_ipht(&debug_priv->ht);
+       return 0;
+}
+
+void tcpinit(struct Fs *fs)
+{
+       struct Proto *tcp;
+       struct tcppriv *tpriv;
+
+       tcp = kzmalloc(sizeof(struct Proto), 0);
+       tpriv = tcp->priv = kzmalloc(sizeof(struct tcppriv), 0);
+       debug_priv = tpriv;
+       qlock_init(&tpriv->tl);
+       qlock_init(&tpriv->apl);
+       tcp->name = "tcp";
+       tcp->connect = tcpconnect;
+       tcp->announce = tcpannounce;
+       tcp->bypass = tcpbypass;
+       tcp->ctl = tcpctl;
+       tcp->state = tcpstate;
+       tcp->create = tcpcreate;
+       tcp->close = tcpclose;
+       tcp->shutdown = tcpshutdown;
+       tcp->rcv = tcpiput;
+       tcp->advise = tcpadvise;
+       tcp->stats = tcpstats;
+       tcp->inuse = tcpinuse;
+       tcp->gc = tcpgc;
+       tcp->ipproto = IP_TCPPROTO;
+       tcp->nc = 4096;
+       tcp->ptclsize = sizeof(Tcpctl);
+       tpriv->stats[MaxConn] = tcp->nc;
+
+       Fsproto(fs, tcp);
+}
+
+static void tcpsetscale(struct conv *s, Tcpctl *tcb, uint16_t rcvscale,
+                        uint16_t sndscale)
+{
+       if (rcvscale) {
+               tcb->rcv.scale = rcvscale & 0xff;
+               tcb->snd.scale = sndscale & 0xff;
+               tcb->window = QMAX << tcb->rcv.scale;
+       } else {
+               tcb->rcv.scale = 0;
+               tcb->snd.scale = 0;
+               tcb->window = QMAX;
+       }
+}