Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 11 Aug 2018 21:10:08 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r337659 - in head/sys/dev/cxgbe: . iw_cxgbe
Message-ID:  <201808112110.w7BLA8qA065531@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Sat Aug 11 21:10:08 2018
New Revision: 337659
URL: https://svnweb.freebsd.org/changeset/base/337659

Log:
  cxgbe(4): Move all control queues to the adapter.
  
  There used to be one control queue per adapter (the mgmtq) that was
  initialized during adapter init and one per port that was initialized
  later during port init.  This change moves all the control queues (one
  per port/channel) to the adapter so that they are initialized during
  adapter init and are available before any port is up.  This allows the
  driver to issue ctrlq work requests over any channel without having to
  bring up any port.
  
  MFH:		2 weeks
  Sponsored by:	Chelsio Communications

Modified:
  head/sys/dev/cxgbe/adapter.h
  head/sys/dev/cxgbe/iw_cxgbe/cq.c
  head/sys/dev/cxgbe/iw_cxgbe/mem.c
  head/sys/dev/cxgbe/iw_cxgbe/qp.c
  head/sys/dev/cxgbe/t4_filter.c
  head/sys/dev/cxgbe/t4_l2t.c
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/t4_sge.c
  head/sys/dev/cxgbe/t4_smt.c
  head/sys/dev/cxgbe/t4_vf.c

Modified: head/sys/dev/cxgbe/adapter.h
==============================================================================
--- head/sys/dev/cxgbe/adapter.h	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/adapter.h	Sat Aug 11 21:10:08 2018	(r337659)
@@ -100,7 +100,7 @@ enum {
 	EQ_ESIZE = 64,
 
 	/* Default queue sizes for all kinds of egress queues */
-	CTRL_EQ_QSIZE = 128,
+	CTRL_EQ_QSIZE = 1024,
 	TX_EQ_QSIZE = 1024,
 
 #if MJUMPAGESIZE != MCLBYTES
@@ -738,7 +738,6 @@ struct sge {
 	int neq;	/* total # of egress queues */
 
 	struct sge_iq fwq;	/* Firmware event queue */
-	struct sge_wrq mgmtq;	/* Management queue (control queue) */
 	struct sge_wrq *ctrlq;	/* Control queues */
 	struct sge_txq *txq;	/* NIC tx queues */
 	struct sge_rxq *rxq;	/* NIC rx queues */

Modified: head/sys/dev/cxgbe/iw_cxgbe/cq.c
==============================================================================
--- head/sys/dev/cxgbe/iw_cxgbe/cq.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/iw_cxgbe/cq.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -63,7 +63,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t
 	struct wrqe *wr;
 
 	wr_len = sizeof *res_wr + sizeof *res;
-	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
                 if (wr == NULL)
                         return (0);
         res_wr = wrtod(wr);
@@ -133,7 +133,7 @@ create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + sizeof *res;
 
-	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
 	if (wr == NULL)
         	return (0);
         res_wr = wrtod(wr);

Modified: head/sys/dev/cxgbe/iw_cxgbe/mem.c
==============================================================================
--- head/sys/dev/cxgbe/iw_cxgbe/mem.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/iw_cxgbe/mem.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -82,7 +82,7 @@ write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u3
 		wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
 				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
 
-		wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+		wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
 		if (wr == NULL)
 			return (0);
 		ulpmc = wrtod(wr);

Modified: head/sys/dev/cxgbe/iw_cxgbe/qp.c
==============================================================================
--- head/sys/dev/cxgbe/iw_cxgbe/qp.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/iw_cxgbe/qp.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -236,7 +236,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + 2 * sizeof *res;
 
-	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
 	if (wr == NULL) {
 		ret = -ENOMEM;
 		goto free_rq_dma;

Modified: head/sys/dev/cxgbe/t4_filter.c
==============================================================================
--- head/sys/dev/cxgbe/t4_filter.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/t4_filter.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -400,7 +400,7 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t
 			len16 = howmany(sizeof(struct fw_filter2_wr), 16);
 		else
 			len16 = howmany(sizeof(struct fw_filter_wr), 16);
-		fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie);
+		fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
 		if (__predict_false(fwr == NULL))
 			rc = ENOMEM;
 		else {
@@ -519,7 +519,7 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t
 		fwr->newfport = htobe16(f->fs.nat_sport);
 		fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
 	}
-	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
 
 	/* Wait for response. */
 	mtx_lock(&sc->tids.ftid_lock);
@@ -824,7 +824,7 @@ del_tcamfilter(struct adapter *sc, struct t4_filter *t
 		goto done;
 	}
 	MPASS(f->tid == tid_base + t->idx);
-	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
+	fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
 	if (fwr == NULL) {
 		rc = ENOMEM;
 		goto done;
@@ -833,7 +833,7 @@ del_tcamfilter(struct adapter *sc, struct t4_filter *t
 	bzero(fwr, sizeof (*fwr));
 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
 	f->pending = 1;
-	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
 	t->fs = f->fs;	/* extra info for the caller */
 
 	for (;;) {
@@ -901,7 +901,7 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t 
 	struct wrq_cookie cookie;
 	struct cpl_set_tcb_field *req;
 
-	req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
+	req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
 	if (req == NULL)
 		return (ENOMEM);
 	bzero(req, sizeof(*req));
@@ -914,7 +914,7 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t 
 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
 	req->mask = htobe64(mask);
 	req->val = htobe64(val);
-	commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
 
 	return (0);
 }
@@ -1044,7 +1044,7 @@ t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct r
 		f->tid = act_open_rpl_status_to_errno(status);
 		f->valid = 0;
 		if (act_open_has_tid(status))
-			release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
+			release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
 		free_filter_resources(f);
 		if (f->locked == 0)
 			free(f, M_CXGBE);
@@ -1081,7 +1081,7 @@ t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct 
 		f->valid = 0;
 		free_filter_resources(f);
 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
-		release_tid(sc, tid, &sc->sge.mgmtq);
+		release_tid(sc, tid, &sc->sge.ctrlq[0]);
 		if (f->locked == 0)
 			free(f, M_CXGBE);
 	}
@@ -1112,7 +1112,7 @@ t4_del_hashfilter_rpl(struct sge_iq *iq, const struct 
 		f->valid = 0;
 		free_filter_resources(f);
 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
-		release_tid(sc, tid, &sc->sge.mgmtq);
+		release_tid(sc, tid, &sc->sge.ctrlq[0]);
 		if (f->locked == 0)
 			free(f, M_CXGBE);
 	}
@@ -1374,7 +1374,7 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t
 	}
 	MPASS(atid >= 0);
 
-	wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
+	wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
 	    &cookie);
 	if (wr == NULL) {
 		free_atid(sc, atid);
@@ -1394,7 +1394,7 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t
 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
 	f->pending = 1;
 	f->tid = -1;
-	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
 
 	for (;;) {
 		MPASS(f->locked);
@@ -1571,7 +1571,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t
 		rc = EBUSY;
 		goto done;
 	}
-	wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
+	wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
 	if (wr == NULL) {
 		rc = ENOMEM;
 		goto done;
@@ -1580,7 +1580,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t
 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
 	f->locked = 1;
 	f->pending = 1;
-	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
+	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
 	t->fs = f->fs;	/* extra info for the caller */
 
 	for (;;) {

Modified: head/sys/dev/cxgbe/t4_l2t.c
==============================================================================
--- head/sys/dev/cxgbe/t4_l2t.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/t4_l2t.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -182,7 +182,7 @@ t4_l2t_set_switching(struct adapter *sc, struct l2t_en
 
 	e->vlan = vlan;
 	e->lport = port;
-	e->wrq = &sc->sge.mgmtq;
+	e->wrq = &sc->sge.ctrlq[0];
 	e->iqid = sc->sge.fwq.abs_id;
 	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
 	mtx_lock(&e->lock);

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/t4_main.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -1052,7 +1052,7 @@ t4_attach(device_t dev)
 		s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
 	}
 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
-	s->neq += nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
+	s->neq += nports;		/* ctrl queues: 1 per port */
 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
 	if (is_offload(sc) || is_ethoffload(sc)) {

Modified: head/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- head/sys/dev/cxgbe/t4_sge.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/t4_sge.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -224,8 +224,8 @@ static void add_fl_sysctls(struct adapter *, struct sy
     struct sysctl_oid *, struct sge_fl *);
 static int alloc_fwq(struct adapter *);
 static int free_fwq(struct adapter *);
-static int alloc_mgmtq(struct adapter *);
-static int free_mgmtq(struct adapter *);
+static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int,
+    struct sysctl_oid *);
 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
     struct sysctl_oid *);
 static int free_rxq(struct vi_info *, struct sge_rxq *);
@@ -1009,7 +1009,8 @@ t4_destroy_dma_tag(struct adapter *sc)
 }
 
 /*
- * Allocate and initialize the firmware event queue and the management queue.
+ * Allocate and initialize the firmware event queue, control queues, and special
+ * purpose rx queues owned by the adapter.
  *
  * Returns errno on failure.  Resources allocated up to that point may still be
  * allocated.  Caller is responsible for cleanup in case this function fails.
@@ -1017,7 +1018,9 @@ t4_destroy_dma_tag(struct adapter *sc)
 int
 t4_setup_adapter_queues(struct adapter *sc)
 {
-	int rc;
+	struct sysctl_oid *oid;
+	struct sysctl_oid_list *children;
+	int rc, i;
 
 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 
@@ -1032,12 +1035,31 @@ t4_setup_adapter_queues(struct adapter *sc)
 		return (rc);
 
 	/*
-	 * Management queue.  This is just a control queue that uses the fwq as
-	 * its associated iq.
+	 * That's all for the VF driver.
 	 */
-	if (!(sc->flags & IS_VF))
-		rc = alloc_mgmtq(sc);
+	if (sc->flags & IS_VF)
+		return (rc);
 
+	oid = device_get_sysctl_tree(sc->dev);
+	children = SYSCTL_CHILDREN(oid);
+
+	/*
+	 * XXX: General purpose rx queues, one per port.
+	 */
+
+	/*
+	 * Control queues, one per port.
+	 */
+	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq",
+	    CTLFLAG_RD, NULL, "control queues");
+	for_each_port(sc, i) {
+		struct sge_wrq *ctrlq = &sc->sge.ctrlq[i];
+
+		rc = alloc_ctrlq(sc, ctrlq, i, oid);
+		if (rc != 0)
+			return (rc);
+	}
+
 	return (rc);
 }
 
@@ -1047,6 +1069,7 @@ t4_setup_adapter_queues(struct adapter *sc)
 int
 t4_teardown_adapter_queues(struct adapter *sc)
 {
+	int i;
 
 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 
@@ -1056,7 +1079,8 @@ t4_teardown_adapter_queues(struct adapter *sc)
 		sc->flags &= ~ADAP_SYSCTL_CTX;
 	}
 
-	free_mgmtq(sc);
+	for_each_port(sc, i)
+		free_wrq(sc, &sc->sge.ctrlq[i]);
 	free_fwq(sc);
 
 	return (0);
@@ -1092,7 +1116,6 @@ t4_setup_vi_queues(struct vi_info *vi)
 	int rc = 0, i, intr_idx, iqidx;
 	struct sge_rxq *rxq;
 	struct sge_txq *txq;
-	struct sge_wrq *ctrlq;
 #ifdef TCP_OFFLOAD
 	struct sge_ofld_rxq *ofld_rxq;
 #endif
@@ -1239,20 +1262,6 @@ t4_setup_vi_queues(struct vi_info *vi)
 			goto done;
 	}
 #endif
-
-	/*
-	 * Finally, the control queue.
-	 */
-	if (!IS_MAIN_VI(vi) || sc->flags & IS_VF)
-		goto done;
-	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
-	    NULL, "ctrl queue");
-	ctrlq = &sc->sge.ctrlq[pi->port_id];
-	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
-	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan,
-	    sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name);
-	rc = alloc_wrq(sc, vi, ctrlq, oid);
-
 done:
 	if (rc)
 		t4_teardown_vi_queues(vi);
@@ -1267,16 +1276,16 @@ int
 t4_teardown_vi_queues(struct vi_info *vi)
 {
 	int i;
-	struct port_info *pi = vi->pi;
-	struct adapter *sc = pi->adapter;
 	struct sge_rxq *rxq;
 	struct sge_txq *txq;
-#ifdef TCP_OFFLOAD
-	struct sge_ofld_rxq *ofld_rxq;
-#endif
 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+	struct port_info *pi = vi->pi;
+	struct adapter *sc = pi->adapter;
 	struct sge_wrq *ofld_txq;
 #endif
+#ifdef TCP_OFFLOAD
+	struct sge_ofld_rxq *ofld_rxq;
+#endif
 #ifdef DEV_NETMAP
 	struct sge_nm_rxq *nm_rxq;
 	struct sge_nm_txq *nm_txq;
@@ -1305,9 +1314,6 @@ t4_teardown_vi_queues(struct vi_info *vi)
 	 * (for egress updates, etc.).
 	 */
 
-	if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
-		free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
-
 	for_each_txq(vi, i, txq) {
 		free_txq(vi, txq);
 	}
@@ -3257,35 +3263,25 @@ free_fwq(struct adapter *sc)
 }
 
 static int
-alloc_mgmtq(struct adapter *sc)
+alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx,
+    struct sysctl_oid *oid)
 {
 	int rc;
-	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
 	char name[16];
-	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
-	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
+	struct sysctl_oid_list *children;
 
-	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
-	    NULL, "management queue");
-
-	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
-	init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
+	snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev),
+	    idx);
+	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan,
 	    sc->sge.fwq.cntxt_id, name);
-	rc = alloc_wrq(sc, NULL, mgmtq, oid);
-	if (rc != 0) {
-		device_printf(sc->dev,
-		    "failed to create management queue: %d\n", rc);
-		return (rc);
-	}
 
-	return (0);
-}
+	children = SYSCTL_CHILDREN(oid);
+	snprintf(name, sizeof(name), "%d", idx);
+	oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+	    NULL, "ctrl queue");
+	rc = alloc_wrq(sc, NULL, ctrlq, oid);
 
-static int
-free_mgmtq(struct adapter *sc)
-{
-
-	return free_wrq(sc, &sc->sge.mgmtq);
+	return (rc);
 }
 
 int

Modified: head/sys/dev/cxgbe/t4_smt.c
==============================================================================
--- head/sys/dev/cxgbe/t4_smt.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/t4_smt.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -210,7 +210,7 @@ t4_smt_set_switching(struct adapter *sc, struct smt_en
 	if (atomic_load_acq_int(&e->refcnt) == 1) {
 		/* Setup the entry for the first time */
 		mtx_lock(&e->lock);
-		e->wrq = &sc->sge.mgmtq;
+		e->wrq = &sc->sge.ctrlq[0];
 		e->iqid = sc->sge.fwq.abs_id;
 		e->pfvf =  pfvf;
 		e->state = SMT_STATE_SWITCHING;

Modified: head/sys/dev/cxgbe/t4_vf.c
==============================================================================
--- head/sys/dev/cxgbe/t4_vf.c	Sat Aug 11 20:49:19 2018	(r337658)
+++ head/sys/dev/cxgbe/t4_vf.c	Sat Aug 11 21:10:08 2018	(r337659)
@@ -662,7 +662,7 @@ t4vf_attach(device_t dev)
 	s->nrxq = sc->params.nports * iaq.nrxq;
 	s->ntxq = sc->params.nports * iaq.ntxq;
 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
-	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
+	s->neq += sc->params.nports;	/* ctrl queues: 1 per port */
 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
 
 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201808112110.w7BLA8qA065531>