Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 27 Aug 2019 04:19:40 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r351540 - in head/sys/dev/cxgbe: . cxgbei iw_cxgbe tom
Message-ID:  <201908270419.x7R4JeLg007426@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Tue Aug 27 04:19:40 2019
New Revision: 351540
URL: https://svnweb.freebsd.org/changeset/base/351540

Log:
  cxgbe/t4_tom: Initialize all TOE connection parameters in one place.
  Remove now-redundant items from toepcb and synq_entry and the code to
  support them.
  
  Let the driver calculate tx_align, rx_coalesce, and sndbuf by default.
  
  Reviewed by:	jhb@
  MFC after:	1 week
  Sponsored by:	Chelsio Communications
  Differential Revision:	https://reviews.freebsd.org/D21387

Modified:
  head/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
  head/sys/dev/cxgbe/iw_cxgbe/qp.c
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/tom/t4_connect.c
  head/sys/dev/cxgbe/tom/t4_cpl_io.c
  head/sys/dev/cxgbe/tom/t4_ddp.c
  head/sys/dev/cxgbe/tom/t4_listen.c
  head/sys/dev/cxgbe/tom/t4_tls.c
  head/sys/dev/cxgbe/tom/t4_tom.c
  head/sys/dev/cxgbe/tom/t4_tom.h

Modified: head/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
==============================================================================
--- head/sys/dev/cxgbe/cxgbei/icl_cxgbei.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/cxgbei/icl_cxgbei.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -697,7 +697,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
 			    ISCSI_DATA_DIGEST_SIZE;
 		}
 		so->so_options |= SO_NO_DDP;
-		toep->ulp_mode = ULP_MODE_ISCSI;
+		toep->params.ulp_mode = ULP_MODE_ISCSI;
 		toep->ulpcb = icc;
 
 		send_iscsi_flowc_wr(icc->sc, toep, ci->max_tx_pdu_len);

Modified: head/sys/dev/cxgbe/iw_cxgbe/qp.c
==============================================================================
--- head/sys/dev/cxgbe/iw_cxgbe/qp.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/iw_cxgbe/qp.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -1415,7 +1415,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw
 	ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
 			qhp->wq.sq.qid, ep->com.so, __func__);
 
-	toep->ulp_mode = ULP_MODE_RDMA;
+	toep->params.ulp_mode = ULP_MODE_RDMA;
 	free_ird(rhp, qhp->attr.max_ird);
 
 	return ret;

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/t4_main.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -6229,9 +6229,9 @@ t4_sysctls(struct adapter *sc)
 		    "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
 		    "3 = highspeed)");
 
-		sc->tt.sndbuf = 256 * 1024;
+		sc->tt.sndbuf = -1;
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
-		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
+		    &sc->tt.sndbuf, 0, "hardware send buffer");
 
 		sc->tt.ddp = 0;
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
@@ -6239,7 +6239,7 @@ t4_sysctls(struct adapter *sc)
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
 		    &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
 
-		sc->tt.rx_coalesce = 1;
+		sc->tt.rx_coalesce = -1;
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
 		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
 
@@ -6251,7 +6251,7 @@ t4_sysctls(struct adapter *sc)
 		    CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports,
 		    "I", "TCP ports that use inline TLS+TOE RX");
 
-		sc->tt.tx_align = 1;
+		sc->tt.tx_align = -1;
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
 		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
 

Modified: head/sys/dev/cxgbe/tom/t4_connect.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_connect.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/tom/t4_connect.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -102,7 +102,7 @@ do_act_establish(struct sge_iq *iq, const struct rss_h
 	make_established(toep, be32toh(cpl->snd_isn) - 1,
 	    be32toh(cpl->rcv_isn) - 1, cpl->tcp_opt);
 
-	if (toep->ulp_mode == ULP_MODE_TLS)
+	if (ulp_mode(toep) == ULP_MODE_TLS)
 		tls_establish(toep);
 
 done:
@@ -165,96 +165,6 @@ do_act_open_rpl(struct sge_iq *iq, const struct rss_he
 	return (0);
 }
 
-/*
- * Options2 for active open.
- */
-static uint32_t
-calc_opt2a(struct socket *so, struct toepcb *toep,
-    const struct offload_settings *s)
-{
-	struct tcpcb *tp = so_sototcpcb(so);
-	struct port_info *pi = toep->vi->pi;
-	struct adapter *sc = pi->adapter;
-	uint32_t opt2 = 0;
-
-	/*
-	 * rx flow control, rx coalesce, congestion control, and tx pace are all
-	 * explicitly set by the driver.  On T5+ the ISS is also set by the
-	 * driver to the value picked by the kernel.
-	 */
-	if (is_t4(sc)) {
-		opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
-		opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
-	} else {
-		opt2 |= F_T5_OPT_2_VALID;	/* all 4 valid */
-		opt2 |= F_T5_ISS;		/* ISS provided in CPL */
-	}
-
-	if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
-		opt2 |= F_SACK_EN;
-
-	if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
-		opt2 |= F_TSTAMPS_EN;
-
-	if (tp->t_flags & TF_REQ_SCALE)
-		opt2 |= F_WND_SCALE_EN;
-
-	if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
-		opt2 |= F_CCTRL_ECN;
-
-	/* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */
-
-	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
-
-	/* These defaults are subject to ULP specific fixups later. */
-	opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
-
-	opt2 |= V_PACE(0);
-
-	if (s->cong_algo >= 0)
-		opt2 |= V_CONG_CNTRL(s->cong_algo);
-	else if (sc->tt.cong_algorithm >= 0)
-		opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL);
-	else {
-		struct cc_algo *cc = CC_ALGO(tp);
-
-		if (strcasecmp(cc->name, "reno") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_RENO);
-		else if (strcasecmp(cc->name, "tahoe") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
-		if (strcasecmp(cc->name, "newreno") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
-		if (strcasecmp(cc->name, "highspeed") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED);
-		else {
-			/*
-			 * Use newreno in case the algorithm selected by the
-			 * host stack is not supported by the hardware.
-			 */
-			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
-		}
-	}
-
-	if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce))
-		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
-
-	/* Note that ofld_rxq is already set according to s->rxq. */
-	opt2 |= F_RSS_QUEUE_VALID;
-	opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id);
-
-#ifdef USE_DDP_RX_FLOW_CONTROL
-	if (toep->ulp_mode == ULP_MODE_TCPDDP)
-		opt2 |= F_RX_FC_DDP;
-#endif
-
-	if (toep->ulp_mode == ULP_MODE_TLS) {
-		opt2 &= ~V_RX_COALESCE(M_RX_COALESCE);
-		opt2 |= F_RX_FC_DISABLE;
-	}
-
-	return (htobe32(opt2));
-}
-
 void
 t4_init_connect_cpl_handlers(void)
 {
@@ -322,7 +232,7 @@ t4_connect(struct toedev *tod, struct socket *so, stru
 	struct wrqe *wr = NULL;
 	struct ifnet *rt_ifp = rt->rt_ifp;
 	struct vi_info *vi;
-	int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid;
+	int qid_atid, rc, isipv6;
 	struct inpcb *inp = sotoinpcb(so);
 	struct tcpcb *tp = intotcpcb(inp);
 	int reason;
@@ -353,18 +263,7 @@ t4_connect(struct toedev *tod, struct socket *so, stru
 	if (!settings.offload)
 		DONT_OFFLOAD_ACTIVE_OPEN(EPERM);
 
-	if (settings.txq >= 0 && settings.txq < vi->nofldtxq)
-		txqid = settings.txq;
-	else
-		txqid = arc4random() % vi->nofldtxq;
-	txqid += vi->first_ofld_txq;
-	if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq)
-		rxqid = settings.rxq;
-	else
-		rxqid = arc4random() % vi->nofldrxq;
-	rxqid += vi->first_ofld_rxq;
-
-	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO);
+	toep = alloc_toepcb(vi, M_NOWAIT);
 	if (toep == NULL)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
@@ -377,27 +276,16 @@ t4_connect(struct toedev *tod, struct socket *so, stru
 	if (toep->l2te == NULL)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
+	toep->vnet = so->so_vnet;
+	init_conn_params(vi, &settings, &inp->inp_inc, so, NULL,
+	    toep->l2te->idx, &toep->params);
+	init_toepcb(vi, toep);
+
 	isipv6 = nam->sa_family == AF_INET6;
 	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
 	if (wr == NULL)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
-	toep->vnet = so->so_vnet;
-	set_ulp_mode(toep, select_ulp_mode(so, sc, &settings));
-	SOCKBUF_LOCK(&so->so_rcv);
-	toep->opt0_rcv_bufsize = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
-	SOCKBUF_UNLOCK(&so->so_rcv);
-
-	/*
-	 * The kernel sets request_r_scale based on sb_max whereas we need to
-	 * take hardware's MAX_RCV_WND into account too.  This is normally a
-	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
-	 */
-	if (tp->t_flags & TF_REQ_SCALE)
-		rscale = tp->request_r_scale = select_rcv_wscale();
-	else
-		rscale = 0;
-	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings);
 	qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
 	    V_TID_COOKIE(CPL_COOKIE_TOM);
 
@@ -438,9 +326,13 @@ t4_connect(struct toedev *tod, struct socket *so, stru
 		cpl->peer_port = inp->inp_fport;
 		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
 		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
-		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
-		    toep->opt0_rcv_bufsize, toep->ulp_mode, &settings);
-		cpl->opt2 = calc_opt2a(so, toep, &settings);
+		cpl->opt0 = calc_options0(vi, &toep->params);
+		cpl->opt2 = calc_options2(vi, &toep->params);
+
+		CTR6(KTR_CXGBE,
+		    "%s: atid %u, toep %p, inp %p, opt0 %#016lx, opt2 %#08x",
+		    __func__, toep->tid, toep, inp, be64toh(cpl->opt0),
+		    be32toh(cpl->opt2));
 	} else {
 		struct cpl_act_open_req *cpl = wrtod(wr);
 		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
@@ -467,13 +359,14 @@ t4_connect(struct toedev *tod, struct socket *so, stru
 		    qid_atid));
 		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
 		    &cpl->peer_ip, &cpl->peer_port);
-		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
-		    toep->opt0_rcv_bufsize, toep->ulp_mode, &settings);
-		cpl->opt2 = calc_opt2a(so, toep, &settings);
-	}
+		cpl->opt0 = calc_options0(vi, &toep->params);
+		cpl->opt2 = calc_options2(vi, &toep->params);
 
-	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
-	    toep->tid, tcpstates[tp->t_state], toep, inp);
+		CTR6(KTR_CXGBE,
+		    "%s: atid %u, toep %p, inp %p, opt0 %#016lx, opt2 %#08x",
+		    __func__, toep->tid, toep, inp, be64toh(cpl->opt0),
+		    be32toh(cpl->opt2));
+	}
 
 	offload_socket(so, toep);
 	rc = t4_l2t_send(sc, wr, toep->l2te);

Modified: head/sys/dev/cxgbe/tom/t4_cpl_io.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_cpl_io.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/tom/t4_cpl_io.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -77,7 +77,7 @@ static void	t4_aiotx_cancel(struct kaiocb *job);
 static void	t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep);
 
 void
-send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
+send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
 {
 	struct wrqe *wr;
 	struct fw_flowc_wr *flowc;
@@ -91,17 +91,17 @@ send_flowc_wr(struct toepcb *toep, struct flowc_tx_par
 	KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
 	    ("%s: flowc for tid %u sent already", __func__, toep->tid));
 
-	if (ftxp != NULL)
+	if (tp != NULL)
 		nparams = 8;
 	else
 		nparams = 6;
-	if (toep->ulp_mode == ULP_MODE_TLS)
+	if (ulp_mode(toep) == ULP_MODE_TLS)
 		nparams++;
 	if (toep->tls.fcplenmax != 0)
 		nparams++;
-	if (toep->tc_idx != -1) {
-		MPASS(toep->tc_idx >= 0 &&
-		    toep->tc_idx < sc->chip_params->nsched_cls);
+	if (toep->params.tc_idx != -1) {
+		MPASS(toep->params.tc_idx >= 0 &&
+		    toep->params.tc_idx < sc->chip_params->nsched_cls);
 		nparams++;
 	}
 
@@ -133,30 +133,23 @@ send_flowc_wr(struct toepcb *toep, struct flowc_tx_par
 	FLOWC_PARAM(CH, pi->tx_chan);
 	FLOWC_PARAM(PORT, pi->tx_chan);
 	FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id);
-	if (ftxp) {
-		uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);
-
-		FLOWC_PARAM(SNDNXT, ftxp->snd_nxt);
-		FLOWC_PARAM(RCVNXT, ftxp->rcv_nxt);
-		FLOWC_PARAM(SNDBUF, sndbuf);
-		FLOWC_PARAM(MSS, ftxp->mss);
-
-		CTR6(KTR_CXGBE,
-		    "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
-		    __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt,
-		    ftxp->rcv_nxt);
-	} else {
-		FLOWC_PARAM(SNDBUF, 512);
-		FLOWC_PARAM(MSS, 512);
-
-		CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
+	FLOWC_PARAM(SNDBUF, toep->params.sndbuf);
+	FLOWC_PARAM(MSS, toep->params.emss);
+	if (tp) {
+		FLOWC_PARAM(SNDNXT, tp->snd_nxt);
+		FLOWC_PARAM(RCVNXT, tp->rcv_nxt);
 	}
-	if (toep->ulp_mode == ULP_MODE_TLS)
-		FLOWC_PARAM(ULP_MODE, toep->ulp_mode);
+	CTR6(KTR_CXGBE,
+	    "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
+	    __func__, toep->tid, toep->params.emss, toep->params.sndbuf,
+	    tp ? tp->snd_nxt : 0, tp ? tp->rcv_nxt : 0);
+
+	if (ulp_mode(toep) == ULP_MODE_TLS)
+		FLOWC_PARAM(ULP_MODE, ulp_mode(toep));
 	if (toep->tls.fcplenmax != 0)
 		FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax);
-	if (toep->tc_idx != -1)
-		FLOWC_PARAM(SCHEDCLASS, toep->tc_idx);
+	if (toep->params.tc_idx != -1)
+		FLOWC_PARAM(SCHEDCLASS, toep->params.tc_idx);
 #undef FLOWC_PARAM
 
 	KASSERT(paramidx == nparams, ("nparams mismatch"));
@@ -197,7 +190,7 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb
 		MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
 	}
 
-	if (toep->tc_idx != tc_idx) {
+	if (toep->params.tc_idx != tc_idx) {
 		struct wrqe *wr;
 		struct fw_flowc_wr *flowc;
 		int nparams = 1, flowclen, flowclen16;
@@ -236,9 +229,9 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb
 		t4_wrq_tx(sc, wr);
 	}
 
-	if (toep->tc_idx >= 0)
-		t4_release_cl_rl(sc, port_id, toep->tc_idx);
-	toep->tc_idx = tc_idx;
+	if (toep->params.tc_idx >= 0)
+		t4_release_cl_rl(sc, port_id, toep->params.tc_idx);
+	toep->params.tc_idx = tc_idx;
 
 	return (0);
 }
@@ -313,30 +306,30 @@ assign_rxopt(struct tcpcb *tp, uint16_t opt)
 
 	INP_LOCK_ASSERT(inp);
 
-	toep->tcp_opt = opt;
-	toep->mtu_idx = G_TCPOPT_MSS(opt);
-	tp->t_maxseg = sc->params.mtus[toep->mtu_idx];
+	toep->params.mtu_idx = G_TCPOPT_MSS(opt);
+	tp->t_maxseg = sc->params.mtus[toep->params.mtu_idx];
 	if (inp->inp_inc.inc_flags & INC_ISIPV6)
 		tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
 	else
 		tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr);
 
-	toep->emss = tp->t_maxseg;
+	toep->params.emss = tp->t_maxseg;
 	if (G_TCPOPT_TSTAMP(opt)) {
+		toep->params.tstamp = 1;
+		toep->params.emss -= TCPOLEN_TSTAMP_APPA;
 		tp->t_flags |= TF_RCVD_TSTMP;	/* timestamps ok */
 		tp->ts_recent = 0;		/* hmmm */
 		tp->ts_recent_age = tcp_ts_getticks();
-		toep->emss -= TCPOLEN_TSTAMP_APPA;
-	}
+	} else
+		toep->params.tstamp = 0;
 
-	CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u",
-	    __func__, toep->tid, toep->mtu_idx,
-	    sc->params.mtus[G_TCPOPT_MSS(opt)], tp->t_maxseg, toep->emss);
-
-	if (G_TCPOPT_SACK(opt))
+	if (G_TCPOPT_SACK(opt)) {
+		toep->params.sack = 1;
 		tp->t_flags |= TF_SACK_PERMIT;	/* should already be set */
-	else
+	} else {
+		toep->params.sack = 0;
 		tp->t_flags &= ~TF_SACK_PERMIT;	/* sack disallowed by peer */
+	}
 
 	if (G_TCPOPT_WSCALE_OK(opt))
 		tp->t_flags |= TF_RCVD_SCALE;
@@ -346,7 +339,13 @@ assign_rxopt(struct tcpcb *tp, uint16_t opt)
 	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
 		tp->rcv_scale = tp->request_r_scale;
 		tp->snd_scale = G_TCPOPT_SND_WSCALE(opt);
-	}
+	} else
+		toep->params.wscale = 0;
+
+	CTR6(KTR_CXGBE,
+	    "assign_rxopt: tid %d, mtu_idx %u, emss %u, ts %u, sack %u, wscale %u",
+	    toep->tid, toep->params.mtu_idx, toep->params.emss,
+	    toep->params.tstamp, toep->params.sack, toep->params.wscale);
 }
 
 /*
@@ -361,9 +360,7 @@ make_established(struct toepcb *toep, uint32_t iss, ui
 	struct inpcb *inp = toep->inp;
 	struct socket *so = inp->inp_socket;
 	struct tcpcb *tp = intotcpcb(inp);
-	long bufsize;
 	uint16_t tcpopt = be16toh(opt);
-	struct flowc_tx_params ftxp;
 
 	INP_WLOCK_ASSERT(inp);
 	KASSERT(tp->t_state == TCPS_SYN_SENT ||
@@ -379,7 +376,7 @@ make_established(struct toepcb *toep, uint32_t iss, ui
 
 	tp->irs = irs;
 	tcp_rcvseqinit(tp);
-	tp->rcv_wnd = (u_int)toep->opt0_rcv_bufsize << 10;
+	tp->rcv_wnd = (u_int)toep->params.opt0_bufsize << 10;
 	tp->rcv_adv += tp->rcv_wnd;
 	tp->last_ack_sent = tp->rcv_nxt;
 
@@ -390,20 +387,8 @@ make_established(struct toepcb *toep, uint32_t iss, ui
 	tp->snd_max = iss + 1;
 
 	assign_rxopt(tp, tcpopt);
+	send_flowc_wr(toep, tp);
 
-	SOCKBUF_LOCK(&so->so_snd);
-	if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf)
-		bufsize = V_tcp_autosndbuf_max;
-	else
-		bufsize = sbspace(&so->so_snd);
-	SOCKBUF_UNLOCK(&so->so_snd);
-
-	ftxp.snd_nxt = tp->snd_nxt;
-	ftxp.rcv_nxt = tp->rcv_nxt;
-	ftxp.snd_space = bufsize;
-	ftxp.mss = toep->emss;
-	send_flowc_wr(toep, &ftxp);
-
 	soisconnected(so);
 }
 
@@ -459,7 +444,7 @@ t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp)
 	SOCKBUF_LOCK_ASSERT(sb);
 
 	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
-	if (toep->ulp_mode == ULP_MODE_TLS) {
+	if (ulp_mode(toep) == ULP_MODE_TLS) {
 		if (toep->tls.rcv_over >= rx_credits) {
 			toep->tls.rcv_over -= rx_credits;
 			rx_credits = 0;
@@ -578,7 +563,7 @@ max_dsgl_nsegs(int tx_credits)
 
 static inline void
 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
-    unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign)
+    unsigned int plen, uint8_t credits, int shove, int ulp_submode)
 {
 	struct fw_ofld_tx_data_wr *txwr = dst;
 
@@ -586,20 +571,18 @@ write_tx_wr(void *dst, struct toepcb *toep, unsigned i
 	    V_FW_WR_IMMDLEN(immdlen));
 	txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
 	    V_FW_WR_LEN16(credits));
-	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) |
+	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ulp_mode(toep)) |
 	    V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove));
 	txwr->plen = htobe32(plen);
 
-	if (txalign > 0) {
-		struct tcpcb *tp = intotcpcb(toep->inp);
-
-		if (plen < 2 * toep->emss)
+	if (toep->params.tx_align > 0) {
+		if (plen < 2 * toep->params.emss)
 			txwr->lsodisable_to_flags |=
 			    htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE);
 		else
 			txwr->lsodisable_to_flags |=
 			    htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD |
-				(tp->t_flags & TF_NODELAY ? 0 :
+				(toep->params.nagle == 0 ? 0 :
 				F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE));
 	}
 }
@@ -694,11 +677,11 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep
 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
 
-	KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
-	    toep->ulp_mode == ULP_MODE_TCPDDP ||
-	    toep->ulp_mode == ULP_MODE_TLS ||
-	    toep->ulp_mode == ULP_MODE_RDMA,
-	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
+	KASSERT(ulp_mode(toep) == ULP_MODE_NONE ||
+	    ulp_mode(toep) == ULP_MODE_TCPDDP ||
+	    ulp_mode(toep) == ULP_MODE_TLS ||
+	    ulp_mode(toep) == ULP_MODE_RDMA,
+	    ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
 
 #ifdef VERBOSE_TRACES
 	CTR5(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
@@ -837,8 +820,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep
 			}
 			txwr = wrtod(wr);
 			credits = howmany(wr->wr_len, 16);
-			write_tx_wr(txwr, toep, plen, plen, credits, shove, 0,
-			    sc->tt.tx_align);
+			write_tx_wr(txwr, toep, plen, plen, credits, shove, 0);
 			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
 			nsegs = 0;
 		} else {
@@ -856,8 +838,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep
 			}
 			txwr = wrtod(wr);
 			credits = howmany(wr_len, 16);
-			write_tx_wr(txwr, toep, 0, plen, credits, shove, 0,
-			    sc->tt.tx_align);
+			write_tx_wr(txwr, toep, 0, plen, credits, shove, 0);
 			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
 			    max_nsegs_1mbuf);
 			if (wr_len & 0xf) {
@@ -877,7 +858,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep
 		    toep->tx_nocompl >= toep->tx_total / 4)
 			compl = 1;
 
-		if (compl || toep->ulp_mode == ULP_MODE_RDMA) {
+		if (compl || ulp_mode(toep) == ULP_MODE_RDMA) {
 			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
 			toep->tx_nocompl = 0;
 			toep->plen_nocompl = 0;
@@ -951,8 +932,8 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, 
 	INP_WLOCK_ASSERT(inp);
 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
-	KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
-	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
+	KASSERT(ulp_mode(toep) == ULP_MODE_ISCSI,
+	    ("%s: ulp_mode %u for toep %p", __func__, ulp_mode(toep), toep));
 
 	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
 		return;
@@ -1035,7 +1016,7 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, 
 			txwr = wrtod(wr);
 			credits = howmany(wr->wr_len, 16);
 			write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
-			    shove, ulp_submode, sc->tt.tx_align);
+			    shove, ulp_submode);
 			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
 			nsegs = 0;
 		} else {
@@ -1053,7 +1034,7 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, 
 			txwr = wrtod(wr);
 			credits = howmany(wr_len, 16);
 			write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
-			    shove, ulp_submode, sc->tt.tx_align);
+			    shove, ulp_submode);
 			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
 			    max_nsegs_1mbuf);
 			if (wr_len & 0xf) {
@@ -1119,7 +1100,7 @@ t4_tod_output(struct toedev *tod, struct tcpcb *tp)
 	    ("%s: inp %p dropped.", __func__, inp));
 	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
 
-	if (toep->ulp_mode == ULP_MODE_ISCSI)
+	if (ulp_mode(toep) == ULP_MODE_ISCSI)
 		t4_push_pdus(sc, toep, 0);
 	else if (tls_tx_key(toep))
 		t4_push_tls_records(sc, toep, 0);
@@ -1145,7 +1126,7 @@ t4_send_fin(struct toedev *tod, struct tcpcb *tp)
 
 	toep->flags |= TPF_SEND_FIN;
 	if (tp->t_state >= TCPS_ESTABLISHED) {
-		if (toep->ulp_mode == ULP_MODE_ISCSI)
+		if (ulp_mode(toep) == ULP_MODE_ISCSI)
 			t4_push_pdus(sc, toep, 0);
 		else if (tls_tx_key(toep))
 			t4_push_tls_records(sc, toep, 0);
@@ -1232,7 +1213,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_head
 
 	so = inp->inp_socket;
 	socantrcvmore(so);
-	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
+	if (ulp_mode(toep) == ULP_MODE_TCPDDP) {
 		DDP_LOCK(toep);
 		if (__predict_false(toep->ddp.flags &
 		    (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)))
@@ -1240,7 +1221,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_head
 		DDP_UNLOCK(toep);
 	}
 
-	if (toep->ulp_mode != ULP_MODE_RDMA) {
+	if (ulp_mode(toep) != ULP_MODE_RDMA) {
 		KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
 	    		("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
 	    		be32toh(cpl->rcv_nxt)));
@@ -1551,14 +1532,14 @@ do_rx_data(struct sge_iq *iq, const struct rss_header 
 
 	tp->rcv_nxt += len;
 	if (tp->rcv_wnd < len) {
-		KASSERT(toep->ulp_mode == ULP_MODE_RDMA,
+		KASSERT(ulp_mode(toep) == ULP_MODE_RDMA,
 				("%s: negative window size", __func__));
 	}
 
 	tp->rcv_wnd -= len;
 	tp->t_rcvtime = ticks;
 
-	if (toep->ulp_mode == ULP_MODE_TCPDDP)
+	if (ulp_mode(toep) == ULP_MODE_TCPDDP)
 		DDP_LOCK(toep);
 	so = inp_inpcbtosocket(inp);
 	sb = &so->so_rcv;
@@ -1569,7 +1550,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header 
 		    __func__, tid, len);
 		m_freem(m);
 		SOCKBUF_UNLOCK(sb);
-		if (toep->ulp_mode == ULP_MODE_TCPDDP)
+		if (ulp_mode(toep) == ULP_MODE_TCPDDP)
 			DDP_UNLOCK(toep);
 		INP_WUNLOCK(inp);
 
@@ -1600,7 +1581,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header 
 			sb->sb_flags &= ~SB_AUTOSIZE;
 	}
 
-	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
+	if (ulp_mode(toep) == ULP_MODE_TCPDDP) {
 		int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off;
 
 		if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0)
@@ -1643,7 +1624,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header 
 		tp->rcv_adv += rx_credits;
 	}
 
-	if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 &&
+	if (ulp_mode(toep) == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 &&
 	    sbavail(sb) != 0) {
 		CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__,
 		    tid);
@@ -1651,7 +1632,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header 
 	}
 	sorwakeup_locked(so);
 	SOCKBUF_UNLOCK_ASSERT(sb);
-	if (toep->ulp_mode == ULP_MODE_TCPDDP)
+	if (ulp_mode(toep) == ULP_MODE_TCPDDP)
 		DDP_UNLOCK(toep);
 
 	INP_WUNLOCK(inp);
@@ -1761,7 +1742,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header 
 #endif
 		toep->flags &= ~TPF_TX_SUSPENDED;
 		CURVNET_SET(toep->vnet);
-		if (toep->ulp_mode == ULP_MODE_ISCSI)
+		if (ulp_mode(toep) == ULP_MODE_ISCSI)
 			t4_push_pdus(sc, toep, plen);
 		else if (tls_tx_key(toep))
 			t4_push_tls_records(sc, toep, plen);
@@ -1774,7 +1755,7 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header 
 
 		SOCKBUF_LOCK(sb);
 		sbu = sbused(sb);
-		if (toep->ulp_mode == ULP_MODE_ISCSI) {
+		if (ulp_mode(toep) == ULP_MODE_ISCSI) {
 
 			if (__predict_false(sbu > 0)) {
 				/*

Modified: head/sys/dev/cxgbe/tom/t4_ddp.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_ddp.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/tom/t4_ddp.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -767,7 +767,7 @@ do_rx_data_ddp(struct sge_iq *iq, const struct rss_hea
 		    __func__, vld, tid, toep);
 	}
 
-	if (toep->ulp_mode == ULP_MODE_ISCSI) {
+	if (ulp_mode(toep) == ULP_MODE_ISCSI) {
 		t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
 		return (0);
 	}

Modified: head/sys/dev/cxgbe/tom/t4_listen.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_listen.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/tom/t4_listen.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -348,7 +348,7 @@ send_reset_synqe(struct toedev *tod, struct synq_entry
 	struct ifnet *ifp = m->m_pkthdr.rcvif;
 	struct vi_info *vi = ifp->if_softc;
 	struct port_info *pi = vi->pi;
-	struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
+	struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx];
 	struct wrqe *wr;
 	struct fw_flowc_wr *flowc;
 	struct cpl_abort_req *req;
@@ -368,8 +368,8 @@ send_reset_synqe(struct toedev *tod, struct synq_entry
 		return;	/* abort already in progress */
 	synqe->flags |= TPF_ABORT_SHUTDOWN;
 
-	ofld_txq = &sc->sge.ofld_txq[synqe->txqid];
-	ofld_rxq = &sc->sge.ofld_rxq[synqe->rxqid];
+	ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx];
+	ofld_rxq = &sc->sge.ofld_rxq[synqe->params.rxq_idx];
 
 	/* The wrqe will have two WRs - a flowc followed by an abort_req */
 	flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
@@ -836,7 +836,7 @@ done_with_synqe(struct adapter *sc, struct synq_entry 
 {
 	struct listen_ctx *lctx = synqe->lctx;
 	struct inpcb *inp = lctx->inp;
-	struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
+	struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx];
 	int ntids;
 
 	INP_WLOCK_ASSERT(inp);
@@ -887,7 +887,7 @@ do_abort_req_synqe(struct sge_iq *iq, const struct rss
 
 	INP_WLOCK(inp);
 
-	ofld_txq = &sc->sge.ofld_txq[synqe->txqid];
+	ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx];
 
 	/*
 	 * If we'd initiated an abort earlier the reply to it is responsible for
@@ -962,28 +962,6 @@ t4_offload_socket(struct toedev *tod, void *arg, struc
 	synqe->flags |= TPF_SYNQE_EXPANDED;
 }
 
-static inline void
-save_qids_in_synqe(struct synq_entry *synqe, struct vi_info *vi,
-    struct offload_settings *s)
-{
-	uint32_t txqid, rxqid;
-
-	if (s->txq >= 0 && s->txq < vi->nofldtxq)
-		txqid = s->txq;
-	else
-		txqid = arc4random() % vi->nofldtxq;
-	txqid += vi->first_ofld_txq;
-
-	if (s->rxq >= 0 && s->rxq < vi->nofldrxq)
-		rxqid = s->rxq;
-	else
-		rxqid = arc4random() % vi->nofldrxq;
-	rxqid += vi->first_ofld_rxq;
-
-	synqe->txqid = txqid;
-	synqe->rxqid = rxqid;
-}
-
 static void
 t4opt_to_tcpopt(const struct tcp_options *t4opt, struct tcpopt *to)
 {
@@ -1006,95 +984,6 @@ t4opt_to_tcpopt(const struct tcp_options *t4opt, struc
 		to->to_flags |= TOF_SACKPERM;
 }
 
-/*
- * Options2 for passive open.
- */
-static uint32_t
-calc_opt2p(struct adapter *sc, struct port_info *pi, int rxqid,
-	const struct tcp_options *tcpopt, struct tcphdr *th, int ulp_mode,
-	struct cc_algo *cc, const struct offload_settings *s)
-{
-	struct sge_ofld_rxq *ofld_rxq = &sc->sge.ofld_rxq[rxqid];
-	uint32_t opt2 = 0;
-
-	/*
-	 * rx flow control, rx coalesce, congestion control, and tx pace are all
-	 * explicitly set by the driver.  On T5+ the ISS is also set by the
-	 * driver to the value picked by the kernel.
-	 */
-	if (is_t4(sc)) {
-		opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
-		opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
-	} else {
-		opt2 |= F_T5_OPT_2_VALID;	/* all 4 valid */
-		opt2 |= F_T5_ISS;		/* ISS provided in CPL */
-	}
-
-	if (tcpopt->sack && (s->sack > 0 || (s->sack < 0 && V_tcp_do_rfc1323)))
-		opt2 |= F_SACK_EN;
-
-	if (tcpopt->tstamp &&
-	    (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323)))
-		opt2 |= F_TSTAMPS_EN;
-
-	if (tcpopt->wsf < 15 && V_tcp_do_rfc1323)
-		opt2 |= F_WND_SCALE_EN;
-
-	if (th->th_flags & (TH_ECE | TH_CWR) &&
-	    (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn)))
-		opt2 |= F_CCTRL_ECN;
-
-	/* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */
-
-	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
-
-	/* These defaults are subject to ULP specific fixups later. */
-	opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
-
-	opt2 |= V_PACE(0);
-
-	if (s->cong_algo >= 0)
-		opt2 |= V_CONG_CNTRL(s->cong_algo);
-	else if (sc->tt.cong_algorithm >= 0)
-		opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL);
-	else {
-		if (strcasecmp(cc->name, "reno") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_RENO);
-		else if (strcasecmp(cc->name, "tahoe") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
-		if (strcasecmp(cc->name, "newreno") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
-		if (strcasecmp(cc->name, "highspeed") == 0)
-			opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED);
-		else {
-			/*
-			 * Use newreno in case the algorithm selected by the
-			 * host stack is not supported by the hardware.
-			 */
-			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
-		}
-	}
-
-	if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce))
-		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
-
-	/* Note that ofld_rxq is already set according to s->rxq. */
-	opt2 |= F_RSS_QUEUE_VALID;
-	opt2 |= V_RSS_QUEUE(ofld_rxq->iq.abs_id);
-
-#ifdef USE_DDP_RX_FLOW_CONTROL
-	if (ulp_mode == ULP_MODE_TCPDDP)
-		opt2 |= F_RX_FC_DDP;
-#endif
-
-	if (ulp_mode == ULP_MODE_TLS) {
-		opt2 &= ~V_RX_COALESCE(M_RX_COALESCE);
-		opt2 |= F_RX_FC_DISABLE;
-	}
-
-	return (htobe32(opt2));
-}
-
 static void
 pass_accept_req_to_protohdrs(struct adapter *sc, const struct mbuf *m,
     struct in_conninfo *inc, struct tcphdr *th)
@@ -1189,7 +1078,7 @@ send_synack(struct adapter *sc, struct synq_entry *syn
 {
 	struct wrqe *wr;
 	struct cpl_pass_accept_rpl *rpl;
-	struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
+	struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx];
 
 	wr = alloc_wrqe(is_t4(sc) ? sizeof(struct cpl_pass_accept_rpl) :
 	    sizeof(struct cpl_t5_pass_accept_rpl), &sc->sge.ctrlq[0]);
@@ -1385,6 +1274,9 @@ found:
 	}
 	atomic_store_int(&synqe->ok_to_respond, 0);
 
+	init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx,
+	    &synqe->params);
+
 	/*
 	 * If all goes well t4_syncache_respond will get called during
 	 * syncache_add.  Note that syncache_add releases the pcb lock.
@@ -1395,27 +1287,12 @@ found:
 	if (atomic_load_int(&synqe->ok_to_respond) > 0) {
 		uint64_t opt0;
 		uint32_t opt2;
-		u_int wnd;
-		int rscale, mtu_idx, rx_credits;
 
-		mtu_idx = find_best_mtu_idx(sc, &inc, &settings);
-		rscale = cpl->tcpopt.wsf && V_tcp_do_rfc1323 ?  select_rcv_wscale() : 0;
-		wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND);
-		wnd = min(wnd, MAX_RCV_WND);
-		rx_credits = min(wnd >> 10, M_RCV_BUFSIZ);
+		opt0 = calc_options0(vi, &synqe->params);
+		opt2 = calc_options2(vi, &synqe->params);
 
-		save_qids_in_synqe(synqe, vi, &settings);
-		synqe->ulp_mode = select_ulp_mode(so, sc, &settings);
-
-		opt0 = calc_opt0(so, vi, e, mtu_idx, rscale, rx_credits,
-		    synqe->ulp_mode, &settings);
-		opt2 = calc_opt2p(sc, pi, synqe->rxqid, &cpl->tcpopt, &th,
-		    synqe->ulp_mode, CC_ALGO(intotcpcb(inp)), &settings);
-
 		insert_tid(sc, tid, synqe, ntids);
 		synqe->tid = tid;
-		synqe->l2e_idx = e->idx;
-		synqe->rcv_bufsize = rx_credits;
 		synqe->syn = m;
 		m = NULL;
 
@@ -1427,8 +1304,8 @@ found:
 		}
 
 		CTR6(KTR_CXGBE,
-		    "%s: stid %u, tid %u, lctx %p, synqe %p, mode %d, SYNACK",
-		    __func__, stid, tid, lctx, synqe, synqe->ulp_mode);
+		    "%s: stid %u, tid %u, synqe %p, opt0 %#016lx, opt2 %#08x",
+		    __func__, stid, tid, synqe, be64toh(opt0), be32toh(opt2));
 	} else
 		REJECT_PASS_ACCEPT_REQ(false);
 
@@ -1540,18 +1417,19 @@ reset:
 		return (0);
 	}
 
-	KASSERT(synqe->rxqid == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0],
+	KASSERT(synqe->params.rxq_idx == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0],
 	    ("%s: CPL arrived on unexpected rxq.  %d %d", __func__,
-	    synqe->rxqid, (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0])));
+	    synqe->params.rxq_idx,
+	    (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0])));
 
-	toep = alloc_toepcb(vi, synqe->txqid, synqe->rxqid, M_NOWAIT);
+	toep = alloc_toepcb(vi, M_NOWAIT);
 	if (toep == NULL)
 		goto reset;
 	toep->tid = tid;
-	toep->l2te = &sc->l2t->l2tab[synqe->l2e_idx];
+	toep->l2te = &sc->l2t->l2tab[synqe->params.l2t_idx];
 	toep->vnet = lctx->vnet;
-	set_ulp_mode(toep, synqe->ulp_mode);
-	toep->opt0_rcv_bufsize = synqe->rcv_bufsize;
+	bcopy(&synqe->params, &toep->params, sizeof(toep->params));
+	init_toepcb(vi, toep);
 
 	MPASS(be32toh(cpl->snd_isn) - 1 == synqe->iss);
 	MPASS(be32toh(cpl->rcv_isn) - 1 == synqe->irs);

Modified: head/sys/dev/cxgbe/tom/t4_tls.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_tls.c	Tue Aug 27 04:16:42 2019	(r351539)
+++ head/sys/dev/cxgbe/tom/t4_tls.c	Tue Aug 27 04:19:40 2019	(r351540)
@@ -590,7 +590,7 @@ program_key_context(struct tcpcb *tp, struct toepcb *t
 	    "KEY_WRITE_TX", uk_ctx->proto_ver);
 
 	if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
-	    toep->ulp_mode != ULP_MODE_TLS)
+	    ulp_mode(toep) != ULP_MODE_TLS)
 		return (EOPNOTSUPP);
 
 	/* Don't copy the 'tx' and 'rx' fields. */
@@ -788,7 +788,7 @@ t4_ctloutput_tls(struct socket *so, struct sockopt *so
 			INP_WUNLOCK(inp);
 			break;
 		case TCP_TLSOM_CLR_TLS_TOM:
-			if (toep->ulp_mode == ULP_MODE_TLS) {
+			if (ulp_mode(toep) == ULP_MODE_TLS) {
 				CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
 				    __func__, toep->tid);
 				tls_clr_ofld_mode(toep);
@@ -797,7 +797,7 @@ t4_ctloutput_tls(struct socket *so, struct sockopt *so
 			INP_WUNLOCK(inp);
 			break;
 		case TCP_TLSOM_CLR_QUIES:
-			if (toep->ulp_mode == ULP_MODE_TLS) {
+			if (ulp_mode(toep) == ULP_MODE_TLS) {
 				CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
 				    __func__, toep->tid);
 				tls_clr_quiesce(toep);
@@ -820,7 +820,7 @@ t4_ctloutput_tls(struct socket *so, struct sockopt *so
 			 */
 			optval = TLS_TOM_NONE;
 			if (can_tls_offload(td_adapter(toep->td))) {
-				switch (toep->ulp_mode) {
+				switch (ulp_mode(toep)) {
 				case ULP_MODE_NONE:
 				case ULP_MODE_TCPDDP:
 					optval = TLS_TOM_TXONLY;
@@ -853,7 +853,7 @@ tls_init_toep(struct toepcb *toep)
 	tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
 	tls_ofld->rx_key_addr = -1;
 	tls_ofld->tx_key_addr = -1;
-	if (toep->ulp_mode == ULP_MODE_TLS)
+	if (ulp_mode(toep) == ULP_MODE_TLS)
 		callout_init_mtx(&tls_ofld->handshake_timer,
 		    &tls_handshake_lock, 0);
 }
@@ -882,7 +882,7 @@ void
 tls_uninit_toep(struct toepcb *toep)
 {
 

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201908270419.x7R4JeLg007426>