Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 22 Dec 2017 19:10:19 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r327093 - head/sys/dev/cxgbe
Message-ID:  <201712221910.vBMJAJLj092570@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Fri Dec 22 19:10:19 2017
New Revision: 327093
URL: https://svnweb.freebsd.org/changeset/base/327093

Log:
  cxgbe(4): Do not forward interrupts to queues with freelists.  This
  leaves the firmware event queue (fwq) as the only queue that can take
  interrupts for others.
  
  This simplifies cfg_itype_and_nqueues and queue allocation in the driver
  at the cost of a little (never?) used configuration.  It also allows
  service_iq to be split into two specialized variants in the future.
  
  MFC after:	2 months
  Sponsored by:	Chelsio Communications

Modified:
  head/sys/dev/cxgbe/adapter.h
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/t4_netmap.c
  head/sys/dev/cxgbe/t4_sge.c
  head/sys/dev/cxgbe/t4_vf.c

Modified: head/sys/dev/cxgbe/adapter.h
==============================================================================
--- head/sys/dev/cxgbe/adapter.h	Fri Dec 22 19:10:19 2017	(r327092)
+++ head/sys/dev/cxgbe/adapter.h	Fri Dec 22 19:10:19 2017	(r327093)
@@ -169,9 +169,6 @@ enum {
 	DOOMED		= (1 << 0),
 	VI_INIT_DONE	= (1 << 1),
 	VI_SYSCTL_CTX	= (1 << 2),
-	INTR_RXQ	= (1 << 4),	/* All NIC rxq's take interrupts */
-	INTR_OFLD_RXQ	= (1 << 5),	/* All TOE rxq's take interrupts */
-	INTR_ALL	= (INTR_RXQ | INTR_OFLD_RXQ),
 
 	/* adapter debug_flags */
 	DF_DUMP_MBOX		= (1 << 0),	/* Log all mbox cmd/rpl. */
@@ -349,7 +346,7 @@ enum {
 	/* iq flags */
 	IQ_ALLOCATED	= (1 << 0),	/* firmware resources allocated */
 	IQ_HAS_FL	= (1 << 1),	/* iq associated with a freelist */
-	IQ_INTR		= (1 << 2),	/* iq takes direct interrupt */
+					/* 1 << 2 Used to be IQ_INTR */
 	IQ_LRO_ENABLED	= (1 << 3),	/* iq is an eth rxq with LRO enabled */
 	IQ_ADJ_CREDIT	= (1 << 4),	/* hw is off by 1 credit for this iq */
 
@@ -955,6 +952,13 @@ struct adapter {
 
 /* One for firmware events */
 #define T4VF_EXTRA_INTR 1
+
+static inline int
+forwarding_intr_to_fwq(struct adapter *sc)
+{
+
+	return (sc->intr_count == 1);
+}
 
 static inline uint32_t
 t4_read_reg(struct adapter *sc, uint32_t reg)

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Fri Dec 22 19:10:19 2017	(r327092)
+++ head/sys/dev/cxgbe/t4_main.c	Fri Dec 22 19:10:19 2017	(r327093)
@@ -470,7 +470,6 @@ struct intrs_and_queues {
 	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
 	uint16_t num_vis;	/* number of VIs for each port */
 	uint16_t nirq;		/* Total # of vectors */
-	uint16_t intr_flags;	/* Interrupt flags for each port */
 	uint16_t ntxq;		/* # of NIC txq's for each port */
 	uint16_t nrxq;		/* # of NIC rxq's for each port */
 	uint16_t nofldtxq;	/* # of TOE txq's for each port */
@@ -1118,7 +1117,6 @@ t4_attach(device_t dev)
 			vi->first_txq = tqidx;
 			vi->tmr_idx = t4_tmr_idx;
 			vi->pktc_idx = t4_pktc_idx;
-			vi->flags |= iaq.intr_flags & INTR_RXQ;
 			vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
 			vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
 
@@ -1135,7 +1133,6 @@ t4_attach(device_t dev)
 			vi->ofld_pktc_idx = t4_pktc_idx_ofld;
 			vi->first_ofld_rxq = ofld_rqidx;
 			vi->first_ofld_txq = ofld_tqidx;
-			vi->flags |= iaq.intr_flags & INTR_OFLD_RXQ;
 			vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
 			vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
 
@@ -2648,26 +2645,43 @@ fixup_devlog_params(struct adapter *sc)
 	return (rc);
 }
 
-static int
-cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
+static void
+update_nirq(struct intrs_and_queues *iaq, int nports)
 {
-	int rc, itype, navail, nrxq, nports, n;
-	int nofldrxq = 0;
+	int extra = T4_EXTRA_INTR;
 
-	nports = sc->params.nports;
+	iaq->nirq = extra;
+	iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
+	iaq->nirq += nports * (iaq->num_vis - 1) *
+	    max(iaq->nrxq_vi, iaq->nnmrxq_vi);
+	iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
+}
+
+/*
+ * Adjust requirements to fit the number of interrupts available.
+ */
+static void
+calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
+    int navail)
+{
+	int old_nirq;
+	const int nports = sc->params.nports;
+
 	MPASS(nports > 0);
+	MPASS(navail > 0);
 
 	bzero(iaq, sizeof(*iaq));
+	iaq->intr_type = itype;
 	iaq->num_vis = t4_num_vis;
 	iaq->ntxq = t4_ntxq;
 	iaq->ntxq_vi = t4_ntxq_vi;
-	iaq->nrxq = nrxq = t4_nrxq;
+	iaq->nrxq = t4_nrxq;
 	iaq->nrxq_vi = t4_nrxq_vi;
 #ifdef TCP_OFFLOAD
 	if (is_offload(sc)) {
 		iaq->nofldtxq = t4_nofldtxq;
 		iaq->nofldtxq_vi = t4_nofldtxq_vi;
-		iaq->nofldrxq = nofldrxq = t4_nofldrxq;
+		iaq->nofldrxq = t4_nofldrxq;
 		iaq->nofldrxq_vi = t4_nofldrxq_vi;
 	}
 #endif
@@ -2676,6 +2690,105 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs
 	iaq->nnmrxq_vi = t4_nnmrxq_vi;
 #endif
 
+	update_nirq(iaq, nports);
+	if (iaq->nirq <= navail &&
+	    (itype != INTR_MSI || powerof2(iaq->nirq))) {
+		/*
+		 * This is the normal case -- there are enough interrupts for
+		 * everything.
+		 */
+		goto done;
+	}
+
+	/*
+	 * If extra VIs have been configured try reducing their count and see if
+	 * that works.
+	 */
+	while (iaq->num_vis > 1) {
+		iaq->num_vis--;
+		update_nirq(iaq, nports);
+		if (iaq->nirq <= navail &&
+		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
+			device_printf(sc->dev, "virtual interfaces per port "
+			    "reduced to %d from %d.  nrxq=%u, nofldrxq=%u, "
+			    "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u.  "
+			    "itype %d, navail %u, nirq %d.\n",
+			    iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
+			    iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
+			    itype, navail, iaq->nirq);
+			goto done;
+		}
+	}
+
+	/*
+	 * Extra VIs will not be created.  Log a message if they were requested.
+	 */
+	MPASS(iaq->num_vis == 1);
+	iaq->ntxq_vi = iaq->nrxq_vi = 0;
+	iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
+	iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
+	if (iaq->num_vis != t4_num_vis) {
+		device_printf(sc->dev, "extra virtual interfaces disabled.  "
+		    "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
+		    "nnmrxq_vi=%u.  itype %d, navail %u, nirq %d.\n",
+		    iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
+		    iaq->nnmrxq_vi, itype, navail, iaq->nirq);
+	}
+
+	/*
+	 * Keep reducing the number of NIC rx queues to the next lower power of
+	 * 2 (for even RSS distribution) and halving the TOE rx queues and see
+	 * if that works.
+	 */
+	do {
+		if (iaq->nrxq > 1) {
+			do {
+				iaq->nrxq--;
+			} while (!powerof2(iaq->nrxq));
+		}
+		if (iaq->nofldrxq > 1)
+			iaq->nofldrxq >>= 1;
+
+		old_nirq = iaq->nirq;
+		update_nirq(iaq, nports);
+		if (iaq->nirq <= navail &&
+		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
+			device_printf(sc->dev, "running with reduced number of "
+			    "rx queues because of shortage of interrupts.  "
+			    "nrxq=%u, nofldrxq=%u.  "
+			    "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
+			    iaq->nofldrxq, itype, navail, iaq->nirq);
+			goto done;
+		}
+	} while (old_nirq != iaq->nirq);
+
+	/* One interrupt for everything.  Ugh. */
+	device_printf(sc->dev, "running with minimal number of queues.  "
+	    "itype %d, navail %u.\n", itype, navail);
+	iaq->nirq = 1;
+	MPASS(iaq->nrxq == 1);
+	iaq->ntxq = 1;
+	if (iaq->nofldrxq > 1)
+		iaq->nofldtxq = 1;
+done:
+	MPASS(iaq->num_vis > 0);
+	if (iaq->num_vis > 1) {
+		MPASS(iaq->nrxq_vi > 0);
+		MPASS(iaq->ntxq_vi > 0);
+	}
+	MPASS(iaq->nirq > 0);
+	MPASS(iaq->nrxq > 0);
+	MPASS(iaq->ntxq > 0);
+	if (itype == INTR_MSI) {
+		MPASS(powerof2(iaq->nirq));
+	}
+}
+
+static int
+cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
+{
+	int rc, itype, navail, nalloc;
+
 	for (itype = INTR_MSIX; itype; itype >>= 1) {
 
 		if ((itype & t4_intr_types) == 0)
@@ -2691,126 +2804,33 @@ restart:
 		if (navail == 0)
 			continue;
 
-		iaq->intr_type = itype;
-		iaq->intr_flags = 0;
-
-		/*
-		 * Best option: an interrupt vector for errors, one for the
-		 * firmware event queue, and one for every rxq (NIC and TOE) of
-		 * every VI.  The VIs that support netmap use the same
-		 * interrupts for the NIC rx queues and the netmap rx queues
-		 * because only one set of queues is active at a time.
-		 */
-		iaq->nirq = T4_EXTRA_INTR;
-		iaq->nirq += nports * (nrxq + nofldrxq);
-		iaq->nirq += nports * (iaq->num_vis - 1) *
-		    max(iaq->nrxq_vi, iaq->nnmrxq_vi);	/* See comment above. */
-		iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
-		if (iaq->nirq <= navail &&
-		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
-			iaq->intr_flags = INTR_ALL;
-			goto allocate;
-		}
-
-		/* Disable the VIs (and netmap) if there aren't enough intrs */
-		if (iaq->num_vis > 1) {
-			device_printf(sc->dev, "virtual interfaces disabled "
-			    "because num_vis=%u with current settings "
-			    "(nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
-			    "nnmrxq_vi=%u) would need %u interrupts but "
-			    "only %u are available.\n", iaq->num_vis, nrxq,
-			    nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
-			    iaq->nnmrxq_vi, iaq->nirq, navail);
-			iaq->num_vis = 1;
-			iaq->ntxq_vi = iaq->nrxq_vi = 0;
-			iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
-			iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
-			goto restart;
-		}
-
-		/*
-		 * Second best option: a vector for errors, one for the firmware
-		 * event queue, and vectors for either all the NIC rx queues or
-		 * all the TOE rx queues.  The queues that don't get vectors
-		 * will forward their interrupts to those that do.
-		 */
-		iaq->nirq = T4_EXTRA_INTR;
-		if (nrxq >= nofldrxq) {
-			iaq->intr_flags = INTR_RXQ;
-			iaq->nirq += nports * nrxq;
-		} else {
-			iaq->intr_flags = INTR_OFLD_RXQ;
-			iaq->nirq += nports * nofldrxq;
-		}
-		if (iaq->nirq <= navail &&
-		    (itype != INTR_MSI || powerof2(iaq->nirq)))
-			goto allocate;
-
-		/*
-		 * Next best option: an interrupt vector for errors, one for the
-		 * firmware event queue, and at least one per main-VI.  At this
-		 * point we know we'll have to downsize nrxq and/or nofldrxq to
-		 * fit what's available to us.
-		 */
-		iaq->nirq = T4_EXTRA_INTR;
-		iaq->nirq += nports;
-		if (iaq->nirq <= navail) {
-			int leftover = navail - iaq->nirq;
-			int target = max(nrxq, nofldrxq);
-
-			iaq->intr_flags = nrxq >= nofldrxq ?
-			    INTR_RXQ : INTR_OFLD_RXQ;
-
-			n = 1;
-			while (n < target && leftover >= nports) {
-				leftover -= nports;
-				iaq->nirq += nports;
-				n++;
-			}
-			iaq->nrxq = min(n, nrxq);
-#ifdef TCP_OFFLOAD
-			iaq->nofldrxq = min(n, nofldrxq);
-#endif
-
-			if (itype != INTR_MSI || powerof2(iaq->nirq))
-				goto allocate;
-		}
-
-		/*
-		 * Least desirable option: one interrupt vector for everything.
-		 */
-		iaq->nirq = iaq->nrxq = 1;
-		iaq->intr_flags = 0;
-#ifdef TCP_OFFLOAD
-		if (is_offload(sc))
-			iaq->nofldrxq = 1;
-#endif
-allocate:
-		navail = iaq->nirq;
+		calculate_iaq(sc, iaq, itype, navail);
+		nalloc = iaq->nirq;
 		rc = 0;
 		if (itype == INTR_MSIX)
-			rc = pci_alloc_msix(sc->dev, &navail);
+			rc = pci_alloc_msix(sc->dev, &nalloc);
 		else if (itype == INTR_MSI)
-			rc = pci_alloc_msi(sc->dev, &navail);
+			rc = pci_alloc_msi(sc->dev, &nalloc);
 
-		if (rc == 0) {
-			if (navail == iaq->nirq)
+		if (rc == 0 && nalloc > 0) {
+			if (nalloc == iaq->nirq)
 				return (0);
 
 			/*
 			 * Didn't get the number requested.  Use whatever number
-			 * the kernel is willing to allocate (it's in navail).
+			 * the kernel is willing to allocate.
 			 */
 			device_printf(sc->dev, "fewer vectors than requested, "
 			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
-			    itype, iaq->nirq, navail);
+			    itype, iaq->nirq, nalloc);
 			pci_release_msi(sc->dev);
+			navail = nalloc;
 			goto restart;
 		}
 
 		device_printf(sc->dev,
 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
-		    itype, rc, iaq->nirq, navail);
+		    itype, rc, iaq->nirq, nalloc);
 	}
 
 	device_printf(sc->dev,
@@ -4352,7 +4372,7 @@ t4_setup_intr_handlers(struct adapter *sc)
 	 */
 	irq = &sc->irq[0];
 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
-	if (sc->intr_count == 1)
+	if (forwarding_intr_to_fwq(sc))
 		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
 
 	/* Multiple interrupts. */
@@ -4387,8 +4407,6 @@ t4_setup_intr_handlers(struct adapter *sc)
 			if (vi->nnmrxq > 0) {
 				int n = max(vi->nrxq, vi->nnmrxq);
 
-				MPASS(vi->flags & INTR_RXQ);
-
 				rxq = &sge->rxq[vi->first_rxq];
 #ifdef DEV_NETMAP
 				nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
@@ -4406,11 +4424,17 @@ t4_setup_intr_handlers(struct adapter *sc)
 					    t4_vi_intr, irq, s);
 					if (rc != 0)
 						return (rc);
+#ifdef RSS
+					if (q < vi->nrxq) {
+						bus_bind_intr(sc->dev, irq->res,
+						    rss_getcpu(q % nbuckets));
+					}
+#endif
 					irq++;
 					rid++;
 					vi->nintr++;
 				}
-			} else if (vi->flags & INTR_RXQ) {
+			} else {
 				for_each_rxq(vi, q, rxq) {
 					snprintf(s, sizeof(s), "%x%c%x", p,
 					    'a' + v, q);
@@ -4428,18 +4452,15 @@ t4_setup_intr_handlers(struct adapter *sc)
 				}
 			}
 #ifdef TCP_OFFLOAD
-			if (vi->flags & INTR_OFLD_RXQ) {
-				for_each_ofld_rxq(vi, q, ofld_rxq) {
-					snprintf(s, sizeof(s), "%x%c%x", p,
-					    'A' + v, q);
-					rc = t4_alloc_irq(sc, irq, rid,
-					    t4_intr, ofld_rxq, s);
-					if (rc != 0)
-						return (rc);
-					irq++;
-					rid++;
-					vi->nintr++;
-				}
+			for_each_ofld_rxq(vi, q, ofld_rxq) {
+				snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
+				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
+				    ofld_rxq, s);
+				if (rc != 0)
+					return (rc);
+				irq++;
+				rid++;
+				vi->nintr++;
 			}
 #endif
 		}

Modified: head/sys/dev/cxgbe/t4_netmap.c
==============================================================================
--- head/sys/dev/cxgbe/t4_netmap.c	Fri Dec 22 19:10:19 2017	(r327092)
+++ head/sys/dev/cxgbe/t4_netmap.c	Fri Dec 22 19:10:19 2017	(r327093)
@@ -108,16 +108,10 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq
 	    V_FW_IQ_CMD_VFN(0));
 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
 	    FW_LEN16(c));
-	if (vi->flags & INTR_RXQ) {
-		KASSERT(nm_rxq->intr_idx < sc->intr_count,
-		    ("%s: invalid direct intr_idx %d", __func__,
-		    nm_rxq->intr_idx));
-		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
-	} else {
-		CXGBE_UNIMPLEMENTED(__func__);	/* XXXNM: needs review */
-		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
-		    F_FW_IQ_CMD_IQANDST;
-	}
+	MPASS(!forwarding_intr_to_fwq(sc));
+	KASSERT(nm_rxq->intr_idx < sc->intr_count,
+	    ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx));
+	v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
 	c.type_to_iqandstindex = htobe32(v |
 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
 	    V_FW_IQ_CMD_VIID(vi->viid) |

Modified: head/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- head/sys/dev/cxgbe/t4_sge.c	Fri Dec 22 19:10:19 2017	(r327092)
+++ head/sys/dev/cxgbe/t4_sge.c	Fri Dec 22 19:10:19 2017	(r327093)
@@ -953,70 +953,6 @@ t4_teardown_adapter_queues(struct adapter *sc)
 	return (0);
 }
 
-static inline int
-first_vector(struct vi_info *vi)
-{
-	struct adapter *sc = vi->pi->adapter;
-
-	if (sc->intr_count == 1)
-		return (0);
-
-	return (vi->first_intr);
-}
-
-/*
- * Given an arbitrary "index," come up with an iq that can be used by other
- * queues (of this VI) for interrupt forwarding, SGE egress updates, etc.
- * The iq returned is guaranteed to be something that takes direct interrupts.
- */
-static struct sge_iq *
-vi_intr_iq(struct vi_info *vi, int idx)
-{
-	struct adapter *sc = vi->pi->adapter;
-	struct sge *s = &sc->sge;
-	struct sge_iq *iq = NULL;
-	int nintr, i;
-
-	if (sc->intr_count == 1)
-		return (&sc->sge.fwq);
-
-	nintr = vi->nintr;
-#ifdef DEV_NETMAP
-	/* Do not consider any netmap-only interrupts */
-	if (vi->flags & INTR_RXQ && vi->nnmrxq > vi->nrxq)
-		nintr -= vi->nnmrxq - vi->nrxq;
-#endif
-	KASSERT(nintr != 0,
-	    ("%s: vi %p has no exclusive interrupts, total interrupts = %d",
-	    __func__, vi, sc->intr_count));
-	i = idx % nintr;
-
-	if (vi->flags & INTR_RXQ) {
-	       	if (i < vi->nrxq) {
-			iq = &s->rxq[vi->first_rxq + i].iq;
-			goto done;
-		}
-		i -= vi->nrxq;
-	}
-#ifdef TCP_OFFLOAD
-	if (vi->flags & INTR_OFLD_RXQ) {
-	       	if (i < vi->nofldrxq) {
-			iq = &s->ofld_rxq[vi->first_ofld_rxq + i].iq;
-			goto done;
-		}
-		i -= vi->nofldrxq;
-	}
-#endif
-	panic("%s: vi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
-	    vi, vi->flags & INTR_ALL, idx, nintr);
-done:
-	MPASS(iq != NULL);
-	KASSERT(iq->flags & IQ_INTR,
-	    ("%s: iq %p (vi %p, intr_flags 0x%lx, idx %d)", __func__, iq, vi,
-	    vi->flags & INTR_ALL, idx));
-	return (iq);
-}
-
 /* Maximum payload that can be delivered with a single iq descriptor */
 static inline int
 mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
@@ -1042,7 +978,7 @@ mtu_to_max_payload(struct adapter *sc, int mtu, const 
 int
 t4_setup_vi_queues(struct vi_info *vi)
 {
-	int rc = 0, i, j, intr_idx, iqid;
+	int rc = 0, i, intr_idx, iqidx;
 	struct sge_rxq *rxq;
 	struct sge_txq *txq;
 	struct sge_wrq *ctrlq;
@@ -1064,14 +1000,14 @@ t4_setup_vi_queues(struct vi_info *vi)
 	int maxp, mtu = ifp->if_mtu;
 
 	/* Interrupt vector to start from (when using multiple vectors) */
-	intr_idx = first_vector(vi);
+	intr_idx = vi->first_intr;
 
 #ifdef DEV_NETMAP
 	saved_idx = intr_idx;
 	if (ifp->if_capabilities & IFCAP_NETMAP) {
 
 		/* netmap is supported with direct interrupts only. */
-		MPASS(vi->flags & INTR_RXQ);
+		MPASS(!forwarding_intr_to_fwq(sc));
 
 		/*
 		 * We don't have buffers to back the netmap rx queues
@@ -1090,8 +1026,8 @@ t4_setup_vi_queues(struct vi_info *vi)
 		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
 		    CTLFLAG_RD, NULL, "tx queues");
 		for_each_nm_txq(vi, i, nm_txq) {
-			iqid = vi->first_nm_rxq + (i % vi->nnmrxq);
-			rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid);
+			iqidx = vi->first_nm_rxq + (i % vi->nnmrxq);
+			rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid);
 			if (rc != 0)
 				goto done;
 		}
@@ -1102,15 +1038,12 @@ t4_setup_vi_queues(struct vi_info *vi)
 #endif
 
 	/*
-	 * First pass over all NIC and TOE rx queues:
-	 * a) initialize iq and fl
-	 * b) allocate queue iff it will take direct interrupts.
+	 * Allocate rx queues first because a default iqid is required when
+	 * creating a tx queue.
 	 */
 	maxp = mtu_to_max_payload(sc, mtu, 0);
-	if (vi->flags & INTR_RXQ) {
-		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
-		    CTLFLAG_RD, NULL, "rx queues");
-	}
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
+	    CTLFLAG_RD, NULL, "rx queues");
 	for_each_rxq(vi, i, rxq) {
 
 		init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
@@ -1119,13 +1052,11 @@ t4_setup_vi_queues(struct vi_info *vi)
 		    device_get_nameunit(vi->dev), i);
 		init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
 
-		if (vi->flags & INTR_RXQ) {
-			rxq->iq.flags |= IQ_INTR;
-			rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
-			if (rc != 0)
-				goto done;
-			intr_idx++;
-		}
+		rc = alloc_rxq(vi, rxq,
+		    forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
+		if (rc != 0)
+			goto done;
+		intr_idx++;
 	}
 #ifdef DEV_NETMAP
 	if (ifp->if_capabilities & IFCAP_NETMAP)
@@ -1133,11 +1064,8 @@ t4_setup_vi_queues(struct vi_info *vi)
 #endif
 #ifdef TCP_OFFLOAD
 	maxp = mtu_to_max_payload(sc, mtu, 1);
-	if (vi->flags & INTR_OFLD_RXQ) {
-		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
-		    CTLFLAG_RD, NULL,
-		    "rx queues for offloaded TCP connections");
-	}
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
+	    CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections");
 	for_each_ofld_rxq(vi, i, ofld_rxq) {
 
 		init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx,
@@ -1147,70 +1075,29 @@ t4_setup_vi_queues(struct vi_info *vi)
 		    device_get_nameunit(vi->dev), i);
 		init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
 
-		if (vi->flags & INTR_OFLD_RXQ) {
-			ofld_rxq->iq.flags |= IQ_INTR;
-			rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
-			if (rc != 0)
-				goto done;
-			intr_idx++;
-		}
+		rc = alloc_ofld_rxq(vi, ofld_rxq,
+		    forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid);
+		if (rc != 0)
+			goto done;
+		intr_idx++;
 	}
 #endif
 
 	/*
-	 * Second pass over all NIC and TOE rx queues.  The queues forwarding
-	 * their interrupts are allocated now.
+	 * Now the tx queues.
 	 */
-	j = 0;
-	if (!(vi->flags & INTR_RXQ)) {
-		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
-		    CTLFLAG_RD, NULL, "rx queues");
-		for_each_rxq(vi, i, rxq) {
-			MPASS(!(rxq->iq.flags & IQ_INTR));
-
-			intr_idx = vi_intr_iq(vi, j)->abs_id;
-
-			rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
-			if (rc != 0)
-				goto done;
-			j++;
-		}
-	}
-#ifdef TCP_OFFLOAD
-	if (vi->nofldrxq != 0 && !(vi->flags & INTR_OFLD_RXQ)) {
-		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
-		    CTLFLAG_RD, NULL,
-		    "rx queues for offloaded TCP connections");
-		for_each_ofld_rxq(vi, i, ofld_rxq) {
-			MPASS(!(ofld_rxq->iq.flags & IQ_INTR));
-
-			intr_idx = vi_intr_iq(vi, j)->abs_id;
-
-			rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
-			if (rc != 0)
-				goto done;
-			j++;
-		}
-	}
-#endif
-
-	/*
-	 * Now the tx queues.  Only one pass needed.
-	 */
 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
 	    NULL, "tx queues");
-	j = 0;
 	for_each_txq(vi, i, txq) {
-		iqid = vi_intr_iq(vi, j)->cntxt_id;
+		iqidx = vi->first_rxq + (i % vi->nrxq);
 		snprintf(name, sizeof(name), "%s txq%d",
 		    device_get_nameunit(vi->dev), i);
-		init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
-		    name);
+		init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan,
+		    sc->sge.rxq[iqidx].iq.cntxt_id, name);
 
 		rc = alloc_txq(vi, txq, i, oid);
 		if (rc != 0)
 			goto done;
-		j++;
 	}
 #ifdef TCP_OFFLOAD
 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
@@ -1218,11 +1105,11 @@ t4_setup_vi_queues(struct vi_info *vi)
 	for_each_ofld_txq(vi, i, ofld_txq) {
 		struct sysctl_oid *oid2;
 
-		iqid = vi_intr_iq(vi, j)->cntxt_id;
+		iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq);
 		snprintf(name, sizeof(name), "%s ofld_txq%d",
 		    device_get_nameunit(vi->dev), i);
 		init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
-		    iqid, name);
+		    sc->sge.ofld_rxq[iqidx].iq.cntxt_id, name);
 
 		snprintf(name, sizeof(name), "%d", i);
 		oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
@@ -1231,7 +1118,6 @@ t4_setup_vi_queues(struct vi_info *vi)
 		rc = alloc_wrq(sc, vi, ofld_txq, oid2);
 		if (rc != 0)
 			goto done;
-		j++;
 	}
 #endif
 
@@ -1243,10 +1129,9 @@ t4_setup_vi_queues(struct vi_info *vi)
 	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
 	    NULL, "ctrl queue");
 	ctrlq = &sc->sge.ctrlq[pi->port_id];
-	iqid = vi_intr_iq(vi, 0)->cntxt_id;
 	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
-	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid,
-	    name);
+	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan,
+	    sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name);
 	rc = alloc_wrq(sc, vi, ctrlq, oid);
 
 done:
@@ -1312,36 +1197,18 @@ t4_teardown_vi_queues(struct vi_info *vi)
 #endif
 
 	/*
-	 * Then take down the rx queues that forward their interrupts, as they
-	 * reference other rx queues.
+	 * Then take down the rx queues.
 	 */
 
 	for_each_rxq(vi, i, rxq) {
-		if ((rxq->iq.flags & IQ_INTR) == 0)
-			free_rxq(vi, rxq);
+		free_rxq(vi, rxq);
 	}
 #ifdef TCP_OFFLOAD
 	for_each_ofld_rxq(vi, i, ofld_rxq) {
-		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
-			free_ofld_rxq(vi, ofld_rxq);
+		free_ofld_rxq(vi, ofld_rxq);
 	}
 #endif
 
-	/*
-	 * Then take down the rx queues that take direct interrupts.
-	 */
-
-	for_each_rxq(vi, i, rxq) {
-		if (rxq->iq.flags & IQ_INTR)
-			free_rxq(vi, rxq);
-	}
-#ifdef TCP_OFFLOAD
-	for_each_ofld_rxq(vi, i, ofld_rxq) {
-		if (ofld_rxq->iq.flags & IQ_INTR)
-			free_ofld_rxq(vi, ofld_rxq);
-	}
-#endif
-
 	return (0);
 }
 
@@ -2715,9 +2582,9 @@ free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_d
  * Returns errno on failure.  Resources allocated up to that point may still be
  * allocated.  Caller is responsible for cleanup in case this function fails.
  *
- * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
- * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
- * the abs_id of the ingress queue to which its interrupts should be forwarded.
+ * If the ingress queue will take interrupts directly then the intr_idx
+ * specifies the vector, starting from 0.  -1 means the interrupts for this
+ * queue should be forwarded to the fwq.
  */
 static int
 alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
@@ -2749,12 +2616,15 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, str
 	if (iq == &sc->sge.fwq)
 		v |= F_FW_IQ_CMD_IQASYNCH;
 
-	if (iq->flags & IQ_INTR) {
+	if (intr_idx < 0) {
+		/* Forwarded interrupts, all headed to fwq */
+		v |= F_FW_IQ_CMD_IQANDST;
+		v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id);
+	} else {
 		KASSERT(intr_idx < sc->intr_count,
 		    ("%s: invalid direct intr_idx %d", __func__, intr_idx));
-	} else
-		v |= F_FW_IQ_CMD_IQANDST;
-	v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
+		v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
+	}
 
 	c.type_to_iqandstindex = htobe32(v |
 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
@@ -3004,7 +2874,6 @@ alloc_fwq(struct adapter *sc)
 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
 
 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
-	fwq->flags |= IQ_INTR;	/* always */
 	if (sc->flags & IS_VF)
 		intr_idx = 0;
 	else {

Modified: head/sys/dev/cxgbe/t4_vf.c
==============================================================================
--- head/sys/dev/cxgbe/t4_vf.c	Fri Dec 22 19:10:19 2017	(r327092)
+++ head/sys/dev/cxgbe/t4_vf.c	Fri Dec 22 19:10:19 2017	(r327093)
@@ -62,7 +62,6 @@ __FBSDID("$FreeBSD$");
 struct intrs_and_queues {
 	uint16_t intr_type;	/* MSI, or MSI-X */
 	uint16_t nirq;		/* Total # of vectors */
-	uint16_t intr_flags;	/* Interrupt flags for each port */
 	uint16_t ntxq;		/* # of NIC txq's for each port */
 	uint16_t nrxq;		/* # of NIC rxq's for each port */
 };
@@ -330,7 +329,6 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs
 			continue;
 
 		iaq->intr_type = itype;
-		iaq->intr_flags = 0;
 
 		/*
 		 * XXX: The Linux driver reserves an Ingress Queue for
@@ -438,7 +436,6 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs
 				return (rc);
 			}
 			if (navail == iaq->nirq) {
-				iaq->intr_flags = INTR_RXQ;
 				return (0);
 			}
 			pci_release_msi(sc->dev);
@@ -455,7 +452,6 @@ cfg_itype_and_nqueues(struct adapter *sc, struct intrs
 			device_printf(sc->dev,
 		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
 			    itype, rc, iaq->nirq, navail);
-		iaq->intr_flags = 0;
 		return (rc);
 	}
 
@@ -702,7 +698,6 @@ t4vf_attach(device_t dev)
 			vi->first_txq = tqidx;
 			vi->tmr_idx = t4_tmr_idx;
 			vi->pktc_idx = t4_pktc_idx;
-			vi->flags |= iaq.intr_flags & INTR_RXQ;
 			vi->nrxq = j == 0 ? iaq.nrxq: 1;
 			vi->ntxq = j == 0 ? iaq.ntxq: 1;
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201712221910.vBMJAJLj092570>