From owner-svn-src-all@freebsd.org Mon Apr 29 04:31:28 2019 Return-Path: Delivered-To: svn-src-all@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id B2EEF1592C15; Mon, 29 Apr 2019 04:31:28 +0000 (UTC) (envelope-from np@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 5558C8A974; Mon, 29 Apr 2019 04:31:28 +0000 (UTC) (envelope-from np@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 14ED41CF72; Mon, 29 Apr 2019 04:31:28 +0000 (UTC) (envelope-from np@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id x3T4VSEW099290; Mon, 29 Apr 2019 04:31:28 GMT (envelope-from np@FreeBSD.org) Received: (from np@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id x3T4VRhp099286; Mon, 29 Apr 2019 04:31:27 GMT (envelope-from np@FreeBSD.org) Message-Id: <201904290431.x3T4VRhp099286@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: np set sender to np@FreeBSD.org using -f From: Navdeep Parhar Date: Mon, 29 Apr 2019 04:31:27 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org Subject: svn commit: r346875 - stable/11/sys/dev/cxgbe X-SVN-Group: stable-11 X-SVN-Commit-Author: np X-SVN-Commit-Paths: stable/11/sys/dev/cxgbe X-SVN-Commit-Revision: 346875 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Rspamd-Queue-Id: 5558C8A974 X-Spamd-Bar: -- Authentication-Results: mx1.freebsd.org X-Spamd-Result: default: False [-2.94 / 15.00]; local_wl_from(0.00)[FreeBSD.org]; NEURAL_HAM_MEDIUM(-1.00)[-0.998,0]; NEURAL_HAM_SHORT(-0.95)[-0.946,0]; ASN(0.00)[asn:11403, ipnet:2610:1c1:1::/48, country:US]; NEURAL_HAM_LONG(-1.00)[-1.000,0] X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 29 Apr 2019 04:31:29 -0000 Author: np Date: Mon Apr 29 04:31:27 2019 New Revision: 346875 URL: https://svnweb.freebsd.org/changeset/base/346875 Log: MFC r337609: cxgbe(4): Create two variants of service_iq, one for queues with freelists and one for those without. MFH: 3 weeks Sponsored by: Chelsio Communications Modified: stable/11/sys/dev/cxgbe/adapter.h stable/11/sys/dev/cxgbe/t4_main.c stable/11/sys/dev/cxgbe/t4_netmap.c stable/11/sys/dev/cxgbe/t4_sge.c Directory Properties: stable/11/ (props changed) Modified: stable/11/sys/dev/cxgbe/adapter.h ============================================================================== --- stable/11/sys/dev/cxgbe/adapter.h Mon Apr 29 03:52:03 2019 (r346874) +++ stable/11/sys/dev/cxgbe/adapter.h Mon Apr 29 04:31:27 2019 (r346875) @@ -670,6 +670,7 @@ struct sge_wrq { struct sge_nm_rxq { + volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */ struct vi_info *vi; struct iq_desc *iq_desc; @@ -795,7 +796,6 @@ struct adapter { struct irq { struct resource *res; int rid; - volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */ void *tag; struct sge_rxq *rxq; struct sge_nm_rxq *nm_rxq; @@ -1139,9 +1139,10 @@ void release_tid(struct adapter *, int, struct sge_wrq #ifdef DEV_NETMAP /* t4_netmap.c */ +struct sge_nm_rxq; void cxgbe_nm_attach(struct vi_info *); void cxgbe_nm_detach(struct vi_info *); -void t4_nm_intr(void *); +void service_nm_rxq(struct sge_nm_rxq *); #endif /* t4_sge.c */ @@ -1160,7 +1161,10 @@ int t4_setup_vi_queues(struct vi_info *); int t4_teardown_vi_queues(struct vi_info *); void t4_intr_all(void *); void t4_intr(void *); +#ifdef DEV_NETMAP +void t4_nm_intr(void *); void t4_vi_intr(void *); +#endif void t4_intr_err(void *); void t4_intr_evt(void *); void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *); Modified: stable/11/sys/dev/cxgbe/t4_main.c ============================================================================== --- stable/11/sys/dev/cxgbe/t4_main.c Mon Apr 29 03:52:03 2019 (r346874) +++ stable/11/sys/dev/cxgbe/t4_main.c Mon Apr 29 04:31:27 2019 (r346875) @@ -761,8 +761,8 @@ struct { #ifdef TCP_OFFLOAD /* - * service_iq() has an iq and needs the fl. Offset of fl from the iq should be - * exactly the same for both rxq and ofld_rxq. + * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should + * be exactly the same for both rxq and ofld_rxq. */ CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); @@ -5004,9 +5004,26 @@ t4_setup_intr_handlers(struct adapter *sc) #ifdef DEV_NETMAP if (q < vi->nnmrxq) irq->nm_rxq = nm_rxq++; + + if (irq->nm_rxq != NULL && + irq->rxq == NULL) { + /* Netmap rx only */ + rc = t4_alloc_irq(sc, irq, rid, + t4_nm_intr, irq->nm_rxq, s); + } + if (irq->nm_rxq != NULL && + irq->rxq != NULL) { + /* NIC and Netmap rx */ + rc = t4_alloc_irq(sc, irq, rid, + t4_vi_intr, irq, s); + } #endif - rc = t4_alloc_irq(sc, irq, rid, - t4_vi_intr, irq, s); + if (irq->rxq != NULL && + irq->nm_rxq == NULL) { + /* NIC rx only */ + rc = t4_alloc_irq(sc, irq, rid, + t4_intr, irq->rxq, s); + } if (rc != 0) return (rc); #ifdef RSS Modified: stable/11/sys/dev/cxgbe/t4_netmap.c ============================================================================== --- stable/11/sys/dev/cxgbe/t4_netmap.c Mon Apr 29 03:52:03 2019 (r346874) +++ stable/11/sys/dev/cxgbe/t4_netmap.c Mon Apr 29 04:31:27 2019 (r346875) @@ -339,8 +339,6 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi nm_set_native_flags(na); for_each_nm_rxq(vi, i, nm_rxq) { - struct irq *irq = &sc->irq[vi->first_intr + i]; - alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); nm_rxq->fl_hwidx = hwidx; slot = netmap_reset(na, NR_RX, i, 0); @@ -363,7 +361,7 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(j)); - atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON); + atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); } for_each_nm_txq(vi, i, nm_txq) { @@ -424,9 +422,7 @@ cxgbe_netmap_off(struct adapter *sc, struct vi_info *v free_nm_txq_hwq(vi, nm_txq); } for_each_nm_rxq(vi, i, nm_rxq) { - struct irq *irq = &sc->irq[vi->first_intr + i]; - - while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF)) + while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) pause("nmst", 1); free_nm_rxq_hwq(vi, nm_rxq); @@ -902,9 +898,8 @@ handle_nm_sge_egr_update(struct adapter *sc, struct if } void -t4_nm_intr(void *arg) +service_nm_rxq(struct sge_nm_rxq *nm_rxq) { - struct sge_nm_rxq *nm_rxq = arg; struct vi_info *vi = nm_rxq->vi; struct adapter *sc = vi->pi->adapter; struct ifnet *ifp = vi->ifp; Modified: stable/11/sys/dev/cxgbe/t4_sge.c ============================================================================== --- stable/11/sys/dev/cxgbe/t4_sge.c Mon Apr 29 03:52:03 2019 (r346874) +++ stable/11/sys/dev/cxgbe/t4_sge.c Mon Apr 29 04:31:27 2019 (r346875) @@ -196,6 +196,7 @@ struct sgl { }; static int service_iq(struct sge_iq *, int); +static int service_iq_fl(struct sge_iq *, int); static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); @@ -1297,8 +1298,12 @@ t4_teardown_vi_queues(struct vi_info *vi) } /* - * Deals with errors and the firmware event queue. All data rx queues forward - * their interrupt to the firmware event queue. + * Interrupt handler when the driver is using only 1 interrupt. This is a very + * unusual scenario. + * + * a) Deals with errors, if any. + * b) Services firmware event queue, which is taking interrupts for all other + * queues. */ void t4_intr_all(void *arg) @@ -1306,14 +1311,16 @@ t4_intr_all(void *arg) struct adapter *sc = arg; struct sge_iq *fwq = &sc->sge.fwq; + MPASS(sc->intr_count == 1); + t4_intr_err(arg); - if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { - service_iq(fwq, 0); - atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); - } + t4_intr_evt(fwq); } -/* Deals with error interrupts */ +/* + * Interrupt handler for errors (installed directly when multiple interrupts are + * being used, or called by t4_intr_all). + */ void t4_intr_err(void *arg) { @@ -1323,6 +1330,10 @@ t4_intr_err(void *arg) t4_slow_intr_handler(sc); } +/* + * Interrupt handler for iq-only queues. The firmware event queue is the only + * such queue right now. + */ void t4_intr_evt(void *arg) { @@ -1334,90 +1345,74 @@ t4_intr_evt(void *arg) } } +/* + * Interrupt handler for iq+fl queues. + */ void t4_intr(void *arg) { struct sge_iq *iq = arg; if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { - service_iq(iq, 0); + service_iq_fl(iq, 0); atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); } } +#ifdef DEV_NETMAP +/* + * Interrupt handler for netmap rx queues. + */ void -t4_vi_intr(void *arg) +t4_nm_intr(void *arg) { - struct irq *irq = arg; + struct sge_nm_rxq *nm_rxq = arg; -#ifdef DEV_NETMAP - if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) { - t4_nm_intr(irq->nm_rxq); - atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON); + if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { + service_nm_rxq(nm_rxq); + atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); } -#endif - if (irq->rxq != NULL) - t4_intr(irq->rxq); } -static inline int -sort_before_lro(struct lro_ctrl *lro) +/* + * Interrupt handler for vectors shared between NIC and netmap rx queues. + */ +void +t4_vi_intr(void *arg) { + struct irq *irq = arg; - return (lro->lro_mbuf_max != 0); + MPASS(irq->nm_rxq != NULL); + t4_nm_intr(irq->nm_rxq); + + MPASS(irq->rxq != NULL); + t4_intr(irq->rxq); } +#endif /* - * Deals with anything and everything on the given ingress queue. + * Deals with interrupts on an iq-only (no freelist) queue. */ static int service_iq(struct sge_iq *iq, int budget) { struct sge_iq *q; - struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ - struct sge_fl *fl; /* Use iff IQ_HAS_FL */ struct adapter *sc = iq->adapter; struct iq_desc *d = &iq->desc[iq->cidx]; int ndescs = 0, limit; - int rsp_type, refill; + int rsp_type; uint32_t lq; - uint16_t fl_hw_cidx; - struct mbuf *m0; STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); -#if defined(INET) || defined(INET6) - const struct timeval lro_timeout = {0, sc->lro_timeout}; - struct lro_ctrl *lro = &rxq->lro; -#endif KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); + KASSERT((iq->flags & IQ_HAS_FL) == 0, + ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, + iq->flags)); + MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); + MPASS((iq->flags & IQ_LRO_ENABLED) == 0); limit = budget ? budget : iq->qsize / 16; - if (iq->flags & IQ_HAS_FL) { - fl = &rxq->fl; - fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ - } else { - fl = NULL; - fl_hw_cidx = 0; /* to silence gcc warning */ - } - -#if defined(INET) || defined(INET6) - if (iq->flags & IQ_ADJ_CREDIT) { - MPASS(sort_before_lro(lro)); - iq->flags &= ~IQ_ADJ_CREDIT; - if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { - tcp_lro_flush_all(lro); - t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | - V_INGRESSQID((u32)iq->cntxt_id) | - V_SEINTARM(iq->intr_params)); - return (0); - } - ndescs = 1; - } -#else - MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); -#endif - /* * We always come back and check the descriptor ring for new indirect * interrupts and other responses after running a single handler. @@ -1427,74 +1422,40 @@ service_iq(struct sge_iq *iq, int budget) rmb(); - refill = 0; - m0 = NULL; rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); lq = be32toh(d->rsp.pldbuflen_qid); switch (rsp_type) { case X_RSPD_TYPE_FLBUF: + panic("%s: data for an iq (%p) with no freelist", + __func__, iq); - KASSERT(iq->flags & IQ_HAS_FL, - ("%s: data for an iq (%p) with no freelist", - __func__, iq)); + /* NOTREACHED */ - m0 = get_fl_payload(sc, fl, lq); - if (__predict_false(m0 == NULL)) - goto process_iql; - refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; -#ifdef T4_PKT_TIMESTAMP - /* - * 60 bit timestamp for the payload is - * *(uint64_t *)m0->m_pktdat. Note that it is - * in the leading free-space in the mbuf. The - * kernel can clobber it during a pullup, - * m_copymdata, etc. You need to make sure that - * the mbuf reaches you unmolested if you care - * about the timestamp. - */ - *(uint64_t *)m0->m_pktdat = - be64toh(ctrl->u.last_flit) & - 0xfffffffffffffff; -#endif - - /* fall through */ - case X_RSPD_TYPE_CPL: KASSERT(d->rss.opcode < NUM_CPL_CMDS, ("%s: bad opcode %02x.", __func__, d->rss.opcode)); - t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); + t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); break; case X_RSPD_TYPE_INTR: - /* - * Interrupts should be forwarded only to queues - * that are not forwarding their interrupts. - * This means service_iq can recurse but only 1 - * level deep. - */ - KASSERT(budget == 0, - ("%s: budget %u, rsp_type %u", __func__, - budget, rsp_type)); - - /* * There are 1K interrupt-capable queues (qids 0 * through 1023). A response type indicating a * forwarded interrupt with a qid >= 1K is an * iWARP async notification. */ - if (lq >= 1024) { - t4_an_handler(iq, &d->rsp); - break; - } + if (__predict_true(lq >= 1024)) { + t4_an_handler(iq, &d->rsp); + break; + } q = sc->sge.iqmap[lq - sc->sge.iq_start - sc->sge.iq_base]; if (atomic_cmpset_int(&q->state, IQS_IDLE, IQS_BUSY)) { - if (service_iq(q, q->qsize / 16) == 0) { + if (service_iq_fl(q, q->qsize / 16) == 0) { atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); } else { @@ -1527,33 +1488,12 @@ service_iq(struct sge_iq *iq, int budget) V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); ndescs = 0; -#if defined(INET) || defined(INET6) - if (iq->flags & IQ_LRO_ENABLED && - !sort_before_lro(lro) && - sc->lro_timeout != 0) { - tcp_lro_flush_inactive(lro, - &lro_timeout); - } -#endif - if (budget) { - if (iq->flags & IQ_HAS_FL) { - FL_LOCK(fl); - refill_fl(sc, fl, 32); - FL_UNLOCK(fl); - } return (EINPROGRESS); } } - if (refill) { - FL_LOCK(fl); - refill_fl(sc, fl, 32); - FL_UNLOCK(fl); - fl_hw_cidx = fl->hw_cidx; - } } -process_iql: if (STAILQ_EMPTY(&iql)) break; @@ -1563,13 +1503,168 @@ process_iql: */ q = STAILQ_FIRST(&iql); STAILQ_REMOVE_HEAD(&iql, link); - if (service_iq(q, q->qsize / 8) == 0) + if (service_iq_fl(q, q->qsize / 8) == 0) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); else STAILQ_INSERT_TAIL(&iql, q, link); } + t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | + V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); + + return (0); +} + +static inline int +sort_before_lro(struct lro_ctrl *lro) +{ + + return (lro->lro_mbuf_max != 0); +} + +/* + * Deals with interrupts on an iq+fl queue. + */ +static int +service_iq_fl(struct sge_iq *iq, int budget) +{ + struct sge_rxq *rxq = iq_to_rxq(iq); + struct sge_fl *fl; + struct adapter *sc = iq->adapter; + struct iq_desc *d = &iq->desc[iq->cidx]; + int ndescs = 0, limit; + int rsp_type, refill, starved; + uint32_t lq; + uint16_t fl_hw_cidx; + struct mbuf *m0; #if defined(INET) || defined(INET6) + const struct timeval lro_timeout = {0, sc->lro_timeout}; + struct lro_ctrl *lro = &rxq->lro; +#endif + + KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); + MPASS(iq->flags & IQ_HAS_FL); + + limit = budget ? budget : iq->qsize / 16; + fl = &rxq->fl; + fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ + +#if defined(INET) || defined(INET6) + if (iq->flags & IQ_ADJ_CREDIT) { + MPASS(sort_before_lro(lro)); + iq->flags &= ~IQ_ADJ_CREDIT; + if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { + tcp_lro_flush_all(lro); + t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | + V_INGRESSQID((u32)iq->cntxt_id) | + V_SEINTARM(iq->intr_params)); + return (0); + } + ndescs = 1; + } +#else + MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); +#endif + + while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { + + rmb(); + + refill = 0; + m0 = NULL; + rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); + lq = be32toh(d->rsp.pldbuflen_qid); + + switch (rsp_type) { + case X_RSPD_TYPE_FLBUF: + + m0 = get_fl_payload(sc, fl, lq); + if (__predict_false(m0 == NULL)) + goto out; + refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; +#ifdef T4_PKT_TIMESTAMP + /* + * 60 bit timestamp for the payload is + * *(uint64_t *)m0->m_pktdat. Note that it is + * in the leading free-space in the mbuf. The + * kernel can clobber it during a pullup, + * m_copymdata, etc. You need to make sure that + * the mbuf reaches you unmolested if you care + * about the timestamp. + */ + *(uint64_t *)m0->m_pktdat = + be64toh(ctrl->u.last_flit) & 0xfffffffffffffff; +#endif + + /* fall through */ + + case X_RSPD_TYPE_CPL: + KASSERT(d->rss.opcode < NUM_CPL_CMDS, + ("%s: bad opcode %02x.", __func__, d->rss.opcode)); + t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); + break; + + case X_RSPD_TYPE_INTR: + + /* + * There are 1K interrupt-capable queues (qids 0 + * through 1023). A response type indicating a + * forwarded interrupt with a qid >= 1K is an + * iWARP async notification. That is the only + * acceptable indirect interrupt on this queue. + */ + if (__predict_false(lq < 1024)) { + panic("%s: indirect interrupt on iq_fl %p " + "with qid %u", __func__, iq, lq); + } + + t4_an_handler(iq, &d->rsp); + break; + + default: + KASSERT(0, ("%s: illegal response type %d on iq %p", + __func__, rsp_type, iq)); + log(LOG_ERR, "%s: illegal response type %d on iq %p", + device_get_nameunit(sc->dev), rsp_type, iq); + break; + } + + d++; + if (__predict_false(++iq->cidx == iq->sidx)) { + iq->cidx = 0; + iq->gen ^= F_RSPD_GEN; + d = &iq->desc[0]; + } + if (__predict_false(++ndescs == limit)) { + t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | + V_INGRESSQID(iq->cntxt_id) | + V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); + ndescs = 0; + +#if defined(INET) || defined(INET6) + if (iq->flags & IQ_LRO_ENABLED && + !sort_before_lro(lro) && + sc->lro_timeout != 0) { + tcp_lro_flush_inactive(lro, &lro_timeout); + } +#endif + if (budget) { + FL_LOCK(fl); + refill_fl(sc, fl, 32); + FL_UNLOCK(fl); + + return (EINPROGRESS); + } + } + if (refill) { + FL_LOCK(fl); + refill_fl(sc, fl, 32); + FL_UNLOCK(fl); + fl_hw_cidx = fl->hw_cidx; + } + } +out: +#if defined(INET) || defined(INET6) if (iq->flags & IQ_LRO_ENABLED) { if (ndescs > 0 && lro->lro_mbuf_count > 8) { MPASS(sort_before_lro(lro)); @@ -1585,15 +1680,11 @@ process_iql: t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); - if (iq->flags & IQ_HAS_FL) { - int starved; - - FL_LOCK(fl); - starved = refill_fl(sc, fl, 64); - FL_UNLOCK(fl); - if (__predict_false(starved != 0)) - add_fl_to_sfl(sc, fl); - } + FL_LOCK(fl); + starved = refill_fl(sc, fl, 64); + FL_UNLOCK(fl); + if (__predict_false(starved != 0)) + add_fl_to_sfl(sc, fl); return (0); }