Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 29 Apr 2019 04:42:18 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r346876 - in stable/11/sys/dev/cxgbe: . iw_cxgbe
Message-ID:  <201904290442.x3T4gIB5006907@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Mon Apr 29 04:42:18 2019
New Revision: 346876
URL: https://svnweb.freebsd.org/changeset/base/346876

Log:
  MFC r337659:
  
  cxgbe(4): Move all control queues to the adapter.
  
  There used to be one control queue per adapter (the mgmtq) that was
  initialized during adapter init and one per port that was initialized
  later during port init.  This change moves all the control queues (one
  per port/channel) to the adapter so that they are initialized during
  adapter init and are available before any port is up.  This allows the
  driver to issue ctrlq work requests over any channel without having to
  bring up any port.

Modified:
  stable/11/sys/dev/cxgbe/adapter.h
  stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c
  stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c
  stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c
  stable/11/sys/dev/cxgbe/t4_filter.c
  stable/11/sys/dev/cxgbe/t4_l2t.c
  stable/11/sys/dev/cxgbe/t4_main.c
  stable/11/sys/dev/cxgbe/t4_sge.c
  stable/11/sys/dev/cxgbe/t4_smt.c
  stable/11/sys/dev/cxgbe/t4_vf.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/dev/cxgbe/adapter.h
==============================================================================
--- stable/11/sys/dev/cxgbe/adapter.h	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/adapter.h	Mon Apr 29 04:42:18 2019	(r346876)
@@ -101,7 +101,7 @@ enum {
 	EQ_ESIZE = 64,
 
 	/* Default queue sizes for all kinds of egress queues */
-	CTRL_EQ_QSIZE = 128,
+	CTRL_EQ_QSIZE = 1024,
 	TX_EQ_QSIZE = 1024,
 
 #if MJUMPAGESIZE != MCLBYTES
@@ -736,7 +736,6 @@ struct sge {
 	int neq;	/* total # of egress queues */
 
 	struct sge_iq fwq;	/* Firmware event queue */
-	struct sge_wrq mgmtq;	/* Management queue (control queue) */
 	struct sge_wrq *ctrlq;	/* Control queues */
 	struct sge_txq *txq;	/* NIC tx queues */
 	struct sge_rxq *rxq;	/* NIC rx queues */

Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/cq.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -61,7 +61,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t
 	struct wrqe *wr;
 
 	wr_len = sizeof *res_wr + sizeof *res;
-	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
                 if (wr == NULL)
                         return (0);
         res_wr = wrtod(wr);
@@ -131,7 +131,7 @@ create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + sizeof *res;
 
-	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
 	if (wr == NULL)
         	return (0);
         res_wr = wrtod(wr);

Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -80,7 +80,7 @@ write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u3
 		wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
 				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
 
-		wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+		wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
 		if (wr == NULL)
 			return (0);
 		ulpmc = wrtod(wr);

Modified: stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c
==============================================================================
--- stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -234,7 +234,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + 2 * sizeof *res;
 
-	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
 	if (wr == NULL) {
 		ret = -ENOMEM;
 		goto free_rq_dma;

Modified: stable/11/sys/dev/cxgbe/t4_filter.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_filter.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/t4_filter.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -400,7 +400,7 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t
 			len16 = howmany(sizeof(struct fw_filter2_wr), 16);
 		else
 			len16 = howmany(sizeof(struct fw_filter_wr), 16);
-		fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie);
+		fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
 		if (__predict_false(fwr == NULL))
 			rc = ENOMEM;
 		else {
@@ -519,7 +519,7 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t
 		fwr->newfport = htobe16(f->fs.nat_sport);
 		fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
 	}
-	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
 
 	/* Wait for response. */
 	mtx_lock(&sc->tids.ftid_lock);
@@ -824,7 +824,7 @@ del_tcamfilter(struct adapter *sc, struct t4_filter *t
 		goto done;
 	}
 	MPASS(f->tid == tid_base + t->idx);
-	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
+	fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
 	if (fwr == NULL) {
 		rc = ENOMEM;
 		goto done;
@@ -833,7 +833,7 @@ del_tcamfilter(struct adapter *sc, struct t4_filter *t
 	bzero(fwr, sizeof (*fwr));
 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
 	f->pending = 1;
-	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
 	t->fs = f->fs;	/* extra info for the caller */
 
 	for (;;) {
@@ -901,7 +901,7 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t 
 	struct wrq_cookie cookie;
 	struct cpl_set_tcb_field *req;
 
-	req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
+	req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
 	if (req == NULL)
 		return (ENOMEM);
 	bzero(req, sizeof(*req));
@@ -914,7 +914,7 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t 
 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
 	req->mask = htobe64(mask);
 	req->val = htobe64(val);
-	commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
 
 	return (0);
 }
@@ -1044,7 +1044,7 @@ t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct r
 		f->tid = act_open_rpl_status_to_errno(status);
 		f->valid = 0;
 		if (act_open_has_tid(status))
-			release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
+			release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
 		free_filter_resources(f);
 		if (f->locked == 0)
 			free(f, M_CXGBE);
@@ -1081,7 +1081,7 @@ t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct 
 		f->valid = 0;
 		free_filter_resources(f);
 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
-		release_tid(sc, tid, &sc->sge.mgmtq);
+		release_tid(sc, tid, &sc->sge.ctrlq[0]);
 		if (f->locked == 0)
 			free(f, M_CXGBE);
 	}
@@ -1112,7 +1112,7 @@ t4_del_hashfilter_rpl(struct sge_iq *iq, const struct 
 		f->valid = 0;
 		free_filter_resources(f);
 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
-		release_tid(sc, tid, &sc->sge.mgmtq);
+		release_tid(sc, tid, &sc->sge.ctrlq[0]);
 		if (f->locked == 0)
 			free(f, M_CXGBE);
 	}
@@ -1374,7 +1374,7 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t
 	}
 	MPASS(atid >= 0);
 
-	wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
+	wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
 	    &cookie);
 	if (wr == NULL) {
 		free_atid(sc, atid);
@@ -1394,7 +1394,7 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t
 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
 	f->pending = 1;
 	f->tid = -1;
-	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
 
 	for (;;) {
 		MPASS(f->locked);
@@ -1571,7 +1571,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t
 		rc = EBUSY;
 		goto done;
 	}
-	wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
+	wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
 	if (wr == NULL) {
 		rc = ENOMEM;
 		goto done;
@@ -1580,7 +1580,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t
 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
 	f->locked = 1;
 	f->pending = 1;
-	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
 	t->fs = f->fs;	/* extra info for the caller */
 
 	for (;;) {

Modified: stable/11/sys/dev/cxgbe/t4_l2t.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_l2t.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/t4_l2t.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -180,7 +180,7 @@ t4_l2t_set_switching(struct adapter *sc, struct l2t_en
 
 	e->vlan = vlan;
 	e->lport = port;
-	e->wrq = &sc->sge.mgmtq;
+	e->wrq = &sc->sge.ctrlq[0];
 	e->iqid = sc->sge.fwq.abs_id;
 	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
 	mtx_lock(&e->lock);

Modified: stable/11/sys/dev/cxgbe/t4_main.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_main.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/t4_main.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -1153,7 +1153,7 @@ t4_attach(device_t dev)
 		s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
 	}
 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
-	s->neq += nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
+	s->neq += nports;		/* ctrl queues: 1 per port */
 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
 #ifdef TCP_OFFLOAD
 	if (is_offload(sc)) {

Modified: stable/11/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_sge.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/t4_sge.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -216,8 +216,8 @@ static void add_fl_sysctls(struct adapter *, struct sy
     struct sysctl_oid *, struct sge_fl *);
 static int alloc_fwq(struct adapter *);
 static int free_fwq(struct adapter *);
-static int alloc_mgmtq(struct adapter *);
-static int free_mgmtq(struct adapter *);
+static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int,
+    struct sysctl_oid *);
 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
     struct sysctl_oid *);
 static int free_rxq(struct vi_info *, struct sge_rxq *);
@@ -983,7 +983,8 @@ t4_destroy_dma_tag(struct adapter *sc)
 }
 
 /*
- * Allocate and initialize the firmware event queue and the management queue.
+ * Allocate and initialize the firmware event queue, control queues, and special
+ * purpose rx queues owned by the adapter.
  *
  * Returns errno on failure.  Resources allocated up to that point may still be
  * allocated.  Caller is responsible for cleanup in case this function fails.
@@ -991,7 +992,9 @@ t4_destroy_dma_tag(struct adapter *sc)
 int
 t4_setup_adapter_queues(struct adapter *sc)
 {
-	int rc;
+	struct sysctl_oid *oid;
+	struct sysctl_oid_list *children;
+	int rc, i;
 
 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 
@@ -1006,12 +1009,31 @@ t4_setup_adapter_queues(struct adapter *sc)
 		return (rc);
 
 	/*
-	 * Management queue.  This is just a control queue that uses the fwq as
-	 * its associated iq.
+	 * That's all for the VF driver.
 	 */
-	if (!(sc->flags & IS_VF))
-		rc = alloc_mgmtq(sc);
+	if (sc->flags & IS_VF)
+		return (rc);
 
+	oid = device_get_sysctl_tree(sc->dev);
+	children = SYSCTL_CHILDREN(oid);
+
+	/*
+	 * XXX: General purpose rx queues, one per port.
+	 */
+
+	/*
+	 * Control queues, one per port.
+	 */
+	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq",
+	    CTLFLAG_RD, NULL, "control queues");
+	for_each_port(sc, i) {
+		struct sge_wrq *ctrlq = &sc->sge.ctrlq[i];
+
+		rc = alloc_ctrlq(sc, ctrlq, i, oid);
+		if (rc != 0)
+			return (rc);
+	}
+
 	return (rc);
 }
 
@@ -1021,6 +1043,7 @@ t4_setup_adapter_queues(struct adapter *sc)
 int
 t4_teardown_adapter_queues(struct adapter *sc)
 {
+	int i;
 
 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 
@@ -1030,7 +1053,8 @@ t4_teardown_adapter_queues(struct adapter *sc)
 		sc->flags &= ~ADAP_SYSCTL_CTX;
 	}
 
-	free_mgmtq(sc);
+	for_each_port(sc, i)
+		free_wrq(sc, &sc->sge.ctrlq[i]);
 	free_fwq(sc);
 
 	return (0);
@@ -1066,7 +1090,6 @@ t4_setup_vi_queues(struct vi_info *vi)
 	int rc = 0, i, intr_idx, iqidx;
 	struct sge_rxq *rxq;
 	struct sge_txq *txq;
-	struct sge_wrq *ctrlq;
 #ifdef TCP_OFFLOAD
 	struct sge_ofld_rxq *ofld_rxq;
 	struct sge_wrq *ofld_txq;
@@ -1205,20 +1228,6 @@ t4_setup_vi_queues(struct vi_info *vi)
 			goto done;
 	}
 #endif
-
-	/*
-	 * Finally, the control queue.
-	 */
-	if (!IS_MAIN_VI(vi) || sc->flags & IS_VF)
-		goto done;
-	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
-	    NULL, "ctrl queue");
-	ctrlq = &sc->sge.ctrlq[pi->port_id];
-	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
-	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan,
-	    sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name);
-	rc = alloc_wrq(sc, vi, ctrlq, oid);
-
 done:
 	if (rc)
 		t4_teardown_vi_queues(vi);
@@ -1233,13 +1242,15 @@ int
 t4_teardown_vi_queues(struct vi_info *vi)
 {
 	int i;
-	struct port_info *pi = vi->pi;
-	struct adapter *sc = pi->adapter;
 	struct sge_rxq *rxq;
 	struct sge_txq *txq;
+#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+	struct port_info *pi = vi->pi;
+	struct adapter *sc = pi->adapter;
+	struct sge_wrq *ofld_txq;
+#endif
 #ifdef TCP_OFFLOAD
 	struct sge_ofld_rxq *ofld_rxq;
-	struct sge_wrq *ofld_txq;
 #endif
 #ifdef DEV_NETMAP
 	struct sge_nm_rxq *nm_rxq;
@@ -1269,9 +1280,6 @@ t4_teardown_vi_queues(struct vi_info *vi)
 	 * (for egress updates, etc.).
 	 */
 
-	if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
-		free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
-
 	for_each_txq(vi, i, txq) {
 		free_txq(vi, txq);
 	}
@@ -3098,35 +3106,25 @@ free_fwq(struct adapter *sc)
 }
 
 static int
-alloc_mgmtq(struct adapter *sc)
+alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx,
+    struct sysctl_oid *oid)
 {
 	int rc;
-	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
 	char name[16];
-	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
-	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
+	struct sysctl_oid_list *children;
 
-	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
-	    NULL, "management queue");
-
-	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
-	init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
+	snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev),
+	    idx);
+	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan,
 	    sc->sge.fwq.cntxt_id, name);
-	rc = alloc_wrq(sc, NULL, mgmtq, oid);
-	if (rc != 0) {
-		device_printf(sc->dev,
-		    "failed to create management queue: %d\n", rc);
-		return (rc);
-	}
 
-	return (0);
-}
+	children = SYSCTL_CHILDREN(oid);
+	snprintf(name, sizeof(name), "%d", idx);
+	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+	    NULL, "ctrl queue");
+	rc = alloc_wrq(sc, NULL, ctrlq, oid);
 
-static int
-free_mgmtq(struct adapter *sc)
-{
-
-	return free_wrq(sc, &sc->sge.mgmtq);
+	return (rc);
 }
 
 int

Modified: stable/11/sys/dev/cxgbe/t4_smt.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_smt.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/t4_smt.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -210,7 +210,7 @@ t4_smt_set_switching(struct adapter *sc, struct smt_en
 	if (atomic_load_acq_int(&e->refcnt) == 1) {
 		/* Setup the entry for the first time */
 		mtx_lock(&e->lock);
-		e->wrq = &sc->sge.mgmtq;
+		e->wrq = &sc->sge.ctrlq[0];
 		e->iqid = sc->sge.fwq.abs_id;
 		e->pfvf =  pfvf;
 		e->state = SMT_STATE_SWITCHING;

Modified: stable/11/sys/dev/cxgbe/t4_vf.c
==============================================================================
--- stable/11/sys/dev/cxgbe/t4_vf.c	Mon Apr 29 04:31:27 2019	(r346875)
+++ stable/11/sys/dev/cxgbe/t4_vf.c	Mon Apr 29 04:42:18 2019	(r346876)
@@ -662,7 +662,7 @@ t4vf_attach(device_t dev)
 	s->nrxq = sc->params.nports * iaq.nrxq;
 	s->ntxq = sc->params.nports * iaq.ntxq;
 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
-	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
+	s->neq += sc->params.nports;	/* ctrl queues: 1 per port */
 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
 
 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201904290442.x3T4gIB5006907>