Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 25 Mar 2015 10:01:07 +0000 (UTC)
From:      Andrew Rybchenko <arybchik@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r280502 - stable/10/sys/dev/sfxge
Message-ID:  <201503251001.t2PA17I6068947@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: arybchik
Date: Wed Mar 25 10:01:07 2015
New Revision: 280502
URL: https://svnweb.freebsd.org/changeset/base/280502

Log:
  MFC: 272328
  
  Make size of Tx and Rx rings configurable
  
  Required size of event queue is calculated now.
  
  Submitted by:   Andrew Rybchenko <arybchenko at solarflare.com>
  Sponsored by:   Solarflare Communications, Inc.

Modified:
  stable/10/sys/dev/sfxge/sfxge.c
  stable/10/sys/dev/sfxge/sfxge.h
  stable/10/sys/dev/sfxge/sfxge_ev.c
  stable/10/sys/dev/sfxge/sfxge_rx.c
  stable/10/sys/dev/sfxge/sfxge_rx.h
  stable/10/sys/dev/sfxge/sfxge_tx.c
  stable/10/sys/dev/sfxge/sfxge_tx.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/sfxge/sfxge.c
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge.c	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge.c	Wed Mar 25 10:01:07 2015	(r280502)
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <sys/sockio.h>
 #include <sys/sysctl.h>
+#include <sys/syslog.h>
 
 #include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
@@ -66,6 +67,25 @@ __FBSDID("$FreeBSD$");
 
 MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
 
+
+SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD, 0,
+	    "SFXGE driver parameters");
+
+#define	SFXGE_PARAM_RX_RING	SFXGE_PARAM(rx_ring)
+static int sfxge_rx_ring_entries = SFXGE_NDESCS;
+TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN,
+	   &sfxge_rx_ring_entries, 0,
+	   "Maximum number of descriptors in a receive ring");
+
+#define	SFXGE_PARAM_TX_RING	SFXGE_PARAM(tx_ring)
+static int sfxge_tx_ring_entries = SFXGE_NDESCS;
+TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
+	   &sfxge_tx_ring_entries, 0,
+	   "Maximum number of descriptors in a transmit ring");
+
+
 static void
 sfxge_reset(void *arg, int npending);
 
@@ -313,8 +333,8 @@ sfxge_ifnet_init(struct ifnet *ifp, stru
 	ifp->if_qflush = sfxge_if_qflush;
 #else
 	ifp->if_start = sfxge_if_start;
-	IFQ_SET_MAXLEN(&ifp->if_snd, SFXGE_NDESCS - 1);
-	ifp->if_snd.ifq_drv_maxlen = SFXGE_NDESCS - 1;
+	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txq_entries - 1);
+	ifp->if_snd.ifq_drv_maxlen = sc->txq_entries - 1;
 	IFQ_SET_READY(&ifp->if_snd);
 
 	mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF);
@@ -413,6 +433,26 @@ sfxge_create(struct sfxge_softc *sc)
 		goto fail3;
 	sc->enp = enp;
 
+	if (!ISP2(sfxge_rx_ring_entries) ||
+	    !(sfxge_rx_ring_entries & EFX_RXQ_NDESCS_MASK)) {
+		log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
+		    SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries,
+		    EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS);
+		error = EINVAL;
+		goto fail_rx_ring_entries;
+	}
+	sc->rxq_entries = sfxge_rx_ring_entries;
+
+	if (!ISP2(sfxge_tx_ring_entries) ||
+	    !(sfxge_tx_ring_entries & EFX_TXQ_NDESCS_MASK)) {
+		log(LOG_ERR, "%s=%d must be power of 2 from %u to %u",
+		    SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries,
+		    EFX_TXQ_MINNDESCS, EFX_TXQ_MAXNDESCS);
+		error = EINVAL;
+		goto fail_tx_ring_entries;
+	}
+	sc->txq_entries = sfxge_tx_ring_entries;
+
 	/* Initialize MCDI to talk to the microcontroller. */
 	if ((error = sfxge_mcdi_init(sc)) != 0)
 		goto fail4;
@@ -485,6 +525,8 @@ fail5:
 	sfxge_mcdi_fini(sc);
 
 fail4:
+fail_tx_ring_entries:
+fail_rx_ring_entries:
 	sc->enp = NULL;
 	efx_nic_destroy(enp);
 	mtx_destroy(&sc->enp_lock);

Modified: stable/10/sys/dev/sfxge/sfxge.h
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge.h	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge.h	Wed Mar 25 10:01:07 2015	(r280502)
@@ -86,6 +86,8 @@
 #include "sfxge_rx.h"
 #include "sfxge_tx.h"
 
+#define	ROUNDUP_POW_OF_TWO(_n)	(1ULL << flsl((_n) - 1))
+
 #define	SFXGE_IP_ALIGN	2
 
 #define	SFXGE_ETHERTYPE_LOOPBACK	0x9000	/* Xerox loopback */
@@ -105,6 +107,7 @@ struct sfxge_evq {
 
 	enum sfxge_evq_state	init_state;
 	unsigned int		index;
+	unsigned int		entries;
 	efsys_mem_t		mem;
 	unsigned int		buf_base_id;
 
@@ -120,7 +123,6 @@ struct sfxge_evq {
 	struct sfxge_txq	**txqs;
 };
 
-#define	SFXGE_NEVS	4096
 #define	SFXGE_NDESCS	1024
 #define	SFXGE_MODERATION	30
 
@@ -208,6 +210,9 @@ struct sfxge_softc {
 	efx_nic_t			*enp;
 	struct mtx			enp_lock;
 
+	unsigned int			rxq_entries;
+	unsigned int			txq_entries;
+
 	bus_dma_tag_t			parent_dma_tag;
 	efsys_bar_t			bar;
 
@@ -245,6 +250,10 @@ struct sfxge_softc {
 #define	SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN)
 #define	SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING)
 
+#define	SFXGE_PARAM(_name)	"hw.sfxge." #_name
+
+SYSCTL_DECL(_hw_sfxge);
+
 /*
  * From sfxge.c.
  */

Modified: stable/10/sys/dev/sfxge/sfxge_ev.c
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge_ev.c	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge_ev.c	Wed Mar 25 10:01:07 2015	(r280502)
@@ -97,7 +97,7 @@ sfxge_ev_rx(void *arg, uint32_t label, u
 	if (rxq->init_state != SFXGE_RXQ_STARTED)
 		goto done;
 
-	expected = rxq->pending++ & (SFXGE_NDESCS - 1);
+	expected = rxq->pending++ & rxq->ptr_mask;
 	if (id != expected) {
 		evq->exception = B_TRUE;
 
@@ -242,10 +242,10 @@ sfxge_ev_tx(void *arg, uint32_t label, u
 	if (txq->init_state != SFXGE_TXQ_STARTED)
 		goto done;
 
-	stop = (id + 1) & (SFXGE_NDESCS - 1);
-	id = txq->pending & (SFXGE_NDESCS - 1);
+	stop = (id + 1) & txq->ptr_mask;
+	id = txq->pending & txq->ptr_mask;
 
-	delta = (stop >= id) ? (stop - id) : (SFXGE_NDESCS - id + stop);
+	delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
 	txq->pending += delta;
 
 	evq->tx_done++;
@@ -630,7 +630,7 @@ sfxge_ev_qstop(struct sfxge_softc *sc, u
 
 	efx_ev_qdestroy(evq->common);
 	efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
-	    EFX_EVQ_NBUFS(SFXGE_NEVS));
+	    EFX_EVQ_NBUFS(evq->entries));
 	mtx_unlock(&evq->lock);
 }
 
@@ -649,15 +649,15 @@ sfxge_ev_qstart(struct sfxge_softc *sc, 
 	    ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
 
 	/* Clear all events. */
-	(void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));
+	(void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
 
 	/* Program the buffer table. */
 	if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
-	    EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
-		return rc;
+	    EFX_EVQ_NBUFS(evq->entries))) != 0)
+		return (rc);
 
 	/* Create the common code event queue. */
-	if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
+	if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries,
 	    evq->buf_base_id, &evq->common)) != 0)
 		goto fail;
 
@@ -700,7 +700,7 @@ fail2:
 	efx_ev_qdestroy(evq->common);
 fail:
 	efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
-	    EFX_EVQ_NBUFS(SFXGE_NEVS));
+	    EFX_EVQ_NBUFS(evq->entries));
 
 	return (rc);
 }
@@ -797,15 +797,31 @@ sfxge_ev_qinit(struct sfxge_softc *sc, u
 	sc->evq[index] = evq;
 	esmp = &evq->mem;
 
+	/* Build an event queue with room for one event per tx and rx buffer,
+	 * plus some extra for link state events and MCDI completions.
+	 * There are three tx queues in the first event queue and one in
+	 * other.
+	 */
+	if (index == 0)
+		evq->entries =
+			ROUNDUP_POW_OF_TWO(sc->rxq_entries +
+					   3 * sc->txq_entries +
+					   128);
+	else
+		evq->entries =
+			ROUNDUP_POW_OF_TWO(sc->rxq_entries +
+					   sc->txq_entries +
+					   128);
+
 	/* Initialise TX completion list */
 	evq->txqs = &evq->txq;
 
 	/* Allocate DMA space. */
-	if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
+	if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
 		return (rc);
 
 	/* Allocate buffer table entries. */
-	sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
+	sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
 				 &evq->buf_base_id);
 
 	mtx_init(&evq->lock, "evq", NULL, MTX_DEF);

Modified: stable/10/sys/dev/sfxge/sfxge_rx.c
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge_rx.c	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge_rx.c	Wed Mar 25 10:01:07 2015	(r280502)
@@ -54,8 +54,7 @@ __FBSDID("$FreeBSD$");
 #include "sfxge.h"
 #include "sfxge_rx.h"
 
-#define	RX_REFILL_THRESHOLD	(EFX_RXQ_LIMIT(SFXGE_NDESCS) * 9 / 10)
-#define	RX_REFILL_THRESHOLD_2	(RX_REFILL_THRESHOLD / 2)
+#define	RX_REFILL_THRESHOLD(_entries)	(EFX_RXQ_LIMIT(_entries) * 9 / 10)
 
 /* Size of the LRO hash table.  Must be a power of 2.  A larger table
  * means we can accelerate a larger number of streams.
@@ -214,11 +213,11 @@ sfxge_rx_qfill(struct sfxge_rxq *rxq, un
 		return;
 
 	rxfill = rxq->added - rxq->completed;
-	KASSERT(rxfill <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
-	    ("rxfill > EFX_RXQ_LIMIT(SFXGE_NDESCS)"));
-	ntodo = min(EFX_RXQ_LIMIT(SFXGE_NDESCS) - rxfill, target);
-	KASSERT(ntodo <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
-	    ("ntodo > EFX_RQX_LIMIT(SFXGE_NDESCS)"));
+	KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries),
+	    ("rxfill > EFX_RXQ_LIMIT(rxq->entries)"));
+	ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target);
+	KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries),
+	    ("ntodo > EFX_RQX_LIMIT(rxq->entries)"));
 
 	if (ntodo == 0)
 		return;
@@ -231,7 +230,7 @@ sfxge_rx_qfill(struct sfxge_rxq *rxq, un
 		bus_dma_segment_t seg;
 		struct mbuf *m;
 
-		id = (rxq->added + batch) & (SFXGE_NDESCS - 1);
+		id = (rxq->added + batch) & rxq->ptr_mask;
 		rx_desc = &rxq->queue[id];
 		KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));
 
@@ -274,7 +273,7 @@ sfxge_rx_qrefill(struct sfxge_rxq *rxq)
 		return;
 
 	/* Make sure the queue is full */
-	sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_TRUE);
+	sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE);
 }
 
 static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m)
@@ -757,7 +756,7 @@ sfxge_rx_qcomplete(struct sfxge_rxq *rxq
 		unsigned int id;
 		struct sfxge_rx_sw_desc *rx_desc;
 
-		id = completed++ & (SFXGE_NDESCS - 1);
+		id = completed++ & rxq->ptr_mask;
 		rx_desc = &rxq->queue[id];
 		m = rx_desc->mbuf;
 
@@ -821,8 +820,8 @@ discard:
 		sfxge_lro_end_of_burst(rxq);
 
 	/* Top up the queue if necessary */
-	if (level < RX_REFILL_THRESHOLD)
-		sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+	if (level < rxq->refill_threshold)
+		sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE);
 }
 
 static void
@@ -884,7 +883,7 @@ again:
 	efx_rx_qdestroy(rxq->common);
 
 	efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
-	    EFX_RXQ_NBUFS(SFXGE_NDESCS));
+	    EFX_RXQ_NBUFS(sc->rxq_entries));
 
 	mtx_unlock(&evq->lock);
 }
@@ -908,12 +907,12 @@ sfxge_rx_qstart(struct sfxge_softc *sc, 
 
 	/* Program the buffer table. */
 	if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp,
-	    EFX_RXQ_NBUFS(SFXGE_NDESCS))) != 0)
-		return rc;
+	    EFX_RXQ_NBUFS(sc->rxq_entries))) != 0)
+		return (rc);
 
 	/* Create the common code receive queue. */
 	if ((rc = efx_rx_qcreate(sc->enp, index, index, EFX_RXQ_TYPE_DEFAULT,
-	    esmp, SFXGE_NDESCS, rxq->buf_base_id, evq->common,
+	    esmp, sc->rxq_entries, rxq->buf_base_id, evq->common,
 	    &rxq->common)) != 0)
 		goto fail;
 
@@ -925,7 +924,7 @@ sfxge_rx_qstart(struct sfxge_softc *sc, 
 	rxq->init_state = SFXGE_RXQ_STARTED;
 
 	/* Try to fill the queue from the pool. */
-	sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(SFXGE_NDESCS), B_FALSE);
+	sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE);
 
 	mtx_unlock(&evq->lock);
 
@@ -933,8 +932,8 @@ sfxge_rx_qstart(struct sfxge_softc *sc, 
 
 fail:
 	efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id,
-	    EFX_RXQ_NBUFS(SFXGE_NDESCS));
-	return rc;
+	    EFX_RXQ_NBUFS(sc->rxq_entries));
+	return (rc);
 }
 
 void
@@ -1105,6 +1104,9 @@ sfxge_rx_qinit(struct sfxge_softc *sc, u
 	rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK);
 	rxq->sc = sc;
 	rxq->index = index;
+	rxq->entries = sc->rxq_entries;
+	rxq->ptr_mask = rxq->entries - 1;
+	rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries);
 
 	sc->rxq[index] = rxq;
 	esmp = &rxq->mem;
@@ -1112,16 +1114,16 @@ sfxge_rx_qinit(struct sfxge_softc *sc, u
 	evq = sc->evq[index];
 
 	/* Allocate and zero DMA space. */
-	if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+	if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0)
 		return (rc);
-	(void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(SFXGE_NDESCS));
+	(void)memset(esmp->esm_base, 0, EFX_RXQ_SIZE(sc->rxq_entries));
 
 	/* Allocate buffer table entries. */
-	sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(SFXGE_NDESCS),
+	sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries),
 				 &rxq->buf_base_id);
 
 	/* Allocate the context array and the flow table. */
-	rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * SFXGE_NDESCS,
+	rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries,
 	    M_SFXGE, M_WAITOK | M_ZERO);
 	sfxge_lro_init(rxq);
 

Modified: stable/10/sys/dev/sfxge/sfxge_rx.h
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge_rx.h	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge_rx.h	Wed Mar 25 10:01:07 2015	(r280502)
@@ -159,6 +159,8 @@ struct sfxge_rxq {
 	efsys_mem_t			mem;
 	unsigned int			buf_base_id;
 	enum sfxge_rxq_state		init_state;
+	unsigned int			entries;
+	unsigned int			ptr_mask;
 
 	struct sfxge_rx_sw_desc		*queue __aligned(CACHE_LINE_SIZE);
 	unsigned int			added;
@@ -166,6 +168,7 @@ struct sfxge_rxq {
 	unsigned int			completed;
 	unsigned int			loopback;
 	struct sfxge_lro_state		lro;
+	unsigned int			refill_threshold;
 	struct callout			refill_callout;
 	unsigned int			refill_delay;
 

Modified: stable/10/sys/dev/sfxge/sfxge_tx.c
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge_tx.c	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge_tx.c	Wed Mar 25 10:01:07 2015	(r280502)
@@ -75,7 +75,7 @@ __FBSDID("$FreeBSD$");
  * minimum MSS of 512.
  */
 #define	SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
-#define	SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC)
+#define	SFXGE_TXQ_BLOCK_LEVEL(_entries)	((_entries) - SFXGE_TSO_MAX_DESC)
 
 /* Forward declarations. */
 static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
@@ -101,7 +101,7 @@ sfxge_tx_qcomplete(struct sfxge_txq *txq
 		struct sfxge_tx_mapping *stmp;
 		unsigned int id;
 
-		id = completed++ & (SFXGE_NDESCS - 1);
+		id = completed++ & txq->ptr_mask;
 
 		stmp = &txq->stmp[id];
 		if (stmp->flags & TX_BUF_UNMAP) {
@@ -125,7 +125,7 @@ sfxge_tx_qcomplete(struct sfxge_txq *txq
 		unsigned int level;
 
 		level = txq->added - txq->completed;
-		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
 			sfxge_tx_qunblock(txq);
 	}
 }
@@ -218,19 +218,19 @@ sfxge_tx_qlist_post(struct sfxge_txq *tx
 		("efx_tx_qpost() refragmented descriptors"));
 
 	level = txq->added - txq->reaped;
-	KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue"));
+	KASSERT(level <= txq->entries, ("overfilled TX queue"));
 
 	/* Clear the fragment list. */
 	txq->n_pend_desc = 0;
 
 	/* Have we reached the block level? */
-	if (level < SFXGE_TXQ_BLOCK_LEVEL)
+	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
 		return;
 
 	/* Reap, and check again */
 	sfxge_tx_qreap(txq);
 	level = txq->added - txq->reaped;
-	if (level < SFXGE_TXQ_BLOCK_LEVEL)
+	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
 		return;
 
 	txq->blocked = 1;
@@ -242,7 +242,7 @@ sfxge_tx_qlist_post(struct sfxge_txq *tx
 	mb();
 	sfxge_tx_qreap(txq);
 	level = txq->added - txq->reaped;
-	if (level < SFXGE_TXQ_BLOCK_LEVEL) {
+	if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
 		mb();
 		txq->blocked = 0;
 	}
@@ -271,7 +271,7 @@ static int sfxge_tx_queue_mbuf(struct sf
 	}
 
 	/* Load the packet for DMA. */
-	id = txq->added & (SFXGE_NDESCS - 1);
+	id = txq->added & txq->ptr_mask;
 	stmp = &txq->stmp[id];
 	rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
 				     mbuf, dma_seg, &n_dma_seg, 0);
@@ -318,7 +318,7 @@ static int sfxge_tx_queue_mbuf(struct sf
 
 			stmp->flags = 0;
 			if (__predict_false(stmp ==
-					    &txq->stmp[SFXGE_NDESCS - 1]))
+					    &txq->stmp[txq->ptr_mask]))
 				stmp = &txq->stmp[0];
 			else
 				stmp++;
@@ -762,20 +762,22 @@ static inline const struct tcphdr *tso_t
  * a TSO header buffer, since they must always be followed by a
  * payload descriptor referring to an mbuf.
  */
-#define	TSOH_COUNT	(SFXGE_NDESCS / 2u)
+#define	TSOH_COUNT(_txq_entries)	((_txq_entries) / 2u)
 #define	TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
-#define	TSOH_PAGE_COUNT	((TSOH_COUNT + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
+#define	TSOH_PAGE_COUNT(_txq_entries)	\
+	((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
 
 static int tso_init(struct sfxge_txq *txq)
 {
 	struct sfxge_softc *sc = txq->sc;
+	unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
 	int i, rc;
 
 	/* Allocate TSO header buffers */
-	txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]),
+	txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
 				  M_SFXGE, M_WAITOK);
 
-	for (i = 0; i < TSOH_PAGE_COUNT; i++) {
+	for (i = 0; i < tsoh_page_count; i++) {
 		rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
 		if (rc != 0)
 			goto fail;
@@ -796,7 +798,7 @@ static void tso_fini(struct sfxge_txq *t
 	int i;
 
 	if (txq->tsoh_buffer != NULL) {
-		for (i = 0; i < TSOH_PAGE_COUNT; i++)
+		for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
 			sfxge_dma_free(&txq->tsoh_buffer[i]);
 		free(txq->tsoh_buffer, M_SFXGE);
 	}
@@ -1010,12 +1012,12 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq
 		tso.dma_addr = dma_seg->ds_addr + tso.header_len;
 	}
 
-	id = txq->added & (SFXGE_NDESCS - 1);
+	id = txq->added & txq->ptr_mask;
 	if (__predict_false(tso_start_new_packet(txq, &tso, id)))
-		return -1;
+		return (-1);
 
 	while (1) {
-		id = (id + 1) & (SFXGE_NDESCS - 1);
+		id = (id + 1) & txq->ptr_mask;
 		tso_fill_packet_with_fragment(txq, &tso);
 
 		/* Move onto the next fragment? */
@@ -1038,7 +1040,7 @@ sfxge_tx_queue_tso(struct sfxge_txq *txq
 			if (txq->n_pend_desc >
 			    SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
 				break;
-			next_id = (id + 1) & (SFXGE_NDESCS - 1);
+			next_id = (id + 1) & txq->ptr_mask;
 			if (__predict_false(tso_start_new_packet(txq, &tso,
 								 next_id)))
 				break;
@@ -1070,7 +1072,7 @@ sfxge_tx_qunblock(struct sfxge_txq *txq)
 		unsigned int level;
 
 		level = txq->added - txq->completed;
-		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
+		if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
 			txq->blocked = 0;
 	}
 
@@ -1146,7 +1148,7 @@ sfxge_tx_qstop(struct sfxge_softc *sc, u
 	txq->common = NULL;
 
 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
-	    EFX_TXQ_NBUFS(SFXGE_NDESCS));
+	    EFX_TXQ_NBUFS(sc->txq_entries));
 
 	mtx_unlock(&evq->lock);
 	mtx_unlock(SFXGE_TXQ_LOCK(txq));
@@ -1172,8 +1174,8 @@ sfxge_tx_qstart(struct sfxge_softc *sc, 
 
 	/* Program the buffer table. */
 	if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
-	    EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0)
-		return rc;
+	    EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
+		return (rc);
 
 	/* Determine the kind of queue we are creating. */
 	switch (txq->type) {
@@ -1194,7 +1196,7 @@ sfxge_tx_qstart(struct sfxge_softc *sc, 
 
 	/* Create the common code transmit queue. */
 	if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
-	    SFXGE_NDESCS, txq->buf_base_id, flags, evq->common,
+	    sc->txq_entries, txq->buf_base_id, flags, evq->common,
 	    &txq->common)) != 0)
 		goto fail;
 
@@ -1211,8 +1213,8 @@ sfxge_tx_qstart(struct sfxge_softc *sc, 
 
 fail:
 	efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
-	    EFX_TXQ_NBUFS(SFXGE_NDESCS));
-	return rc;
+	    EFX_TXQ_NBUFS(sc->txq_entries));
+	return (rc);
 }
 
 void
@@ -1280,7 +1282,7 @@ static void
 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
 {
 	struct sfxge_txq *txq;
-	unsigned int nmaps = SFXGE_NDESCS;
+	unsigned int nmaps;
 
 	txq = sc->txq[index];
 
@@ -1292,6 +1294,7 @@ sfxge_tx_qfini(struct sfxge_softc *sc, u
 
 	/* Free the context arrays. */
 	free(txq->pend_desc, M_SFXGE);
+	nmaps = sc->txq_entries;
 	while (nmaps-- != 0)
 		bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
 	free(txq->stmp, M_SFXGE);
@@ -1323,6 +1326,8 @@ sfxge_tx_qinit(struct sfxge_softc *sc, u
 
 	txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
 	txq->sc = sc;
+	txq->entries = sc->txq_entries;
+	txq->ptr_mask = txq->entries - 1;
 
 	sc->txq[txq_index] = txq;
 	esmp = &txq->mem;
@@ -1330,12 +1335,12 @@ sfxge_tx_qinit(struct sfxge_softc *sc, u
 	evq = sc->evq[evq_index];
 
 	/* Allocate and zero DMA space for the descriptor ring. */
-	if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
+	if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
 		return (rc);
-	(void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS));
+	(void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
 
 	/* Allocate buffer table entries. */
-	sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS),
+	sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
 				 &txq->buf_base_id);
 
 	/* Create a DMA tag for packet mappings. */
@@ -1349,13 +1354,13 @@ sfxge_tx_qinit(struct sfxge_softc *sc, u
 	}
 
 	/* Allocate pending descriptor array for batching writes. */
-	txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS,
+	txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
 				M_SFXGE, M_ZERO | M_WAITOK);
 
 	/* Allocate and initialise mbuf DMA mapping array. */
-	txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS,
+	txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
 	    M_SFXGE, M_ZERO | M_WAITOK);
-	for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) {
+	for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
 		rc = bus_dmamap_create(txq->packet_dma_tag, 0,
 				       &txq->stmp[nmaps].map);
 		if (rc != 0)

Modified: stable/10/sys/dev/sfxge/sfxge_tx.h
==============================================================================
--- stable/10/sys/dev/sfxge/sfxge_tx.h	Wed Mar 25 09:59:38 2015	(r280501)
+++ stable/10/sys/dev/sfxge/sfxge_tx.h	Wed Mar 25 10:01:07 2015	(r280502)
@@ -106,7 +106,7 @@ enum sfxge_txq_type {
 	SFXGE_TXQ_NTYPES
 };
 
-#define	SFXGE_TXQ_UNBLOCK_LEVEL		(EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
+#define	SFXGE_TXQ_UNBLOCK_LEVEL(_entries)	(EFX_TXQ_LIMIT(_entries) / 4)
 
 #define	SFXGE_TX_BATCH	64
 
@@ -128,6 +128,8 @@ struct sfxge_txq {
 	unsigned int			evq_index;
 	efsys_mem_t			mem;
 	unsigned int			buf_base_id;
+	unsigned int			entries;
+	unsigned int			ptr_mask;
 
 	struct sfxge_tx_mapping		*stmp;	/* Packets in flight. */
 	bus_dma_tag_t			packet_dma_tag;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201503251001.t2PA17I6068947>