Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 31 Mar 2011 22:40:44 +0000 (UTC)
From:      David Christensen <davidch@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r220229 - head/sys/dev/bxe
Message-ID:  <201103312240.p2VMeiEb024871@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: davidch
Date: Thu Mar 31 22:40:44 2011
New Revision: 220229
URL: http://svn.freebsd.org/changeset/base/220229

Log:
  - Freshened debug support code.
  - Renamed several RX variable for more consistent usage.
  - Fixed a potential problem when masking RX CQ producer value.
  
  MFC after:	One week.

Modified:
  head/sys/dev/bxe/if_bxe.c
  head/sys/dev/bxe/if_bxe.h

Modified: head/sys/dev/bxe/if_bxe.c
==============================================================================
--- head/sys/dev/bxe/if_bxe.c	Thu Mar 31 22:04:00 2011	(r220228)
+++ head/sys/dev/bxe/if_bxe.c	Thu Mar 31 22:40:44 2011	(r220229)
@@ -68,7 +68,8 @@ __FBSDID("$FreeBSD$");
 
 /* BXE Debug Options */
 #ifdef BXE_DEBUG
-	uint32_t bxe_debug = BXE_INFO;
+uint32_t bxe_debug = BXE_INFO;
+
 
 /*          0 = Never              */
 /*          1 = 1 in 2,147,483,648 */
@@ -345,8 +346,8 @@ int  bxe_set_gpio_int(struct bxe_softc *
 static int bxe_sysctl_driver_state(SYSCTL_HANDLER_ARGS);
 static int bxe_sysctl_hw_state(SYSCTL_HANDLER_ARGS);
 static int bxe_sysctl_dump_fw(SYSCTL_HANDLER_ARGS);
-static int bxe_sysctl_dump_cqe_chain(SYSCTL_HANDLER_ARGS);
-static int bxe_sysctl_dump_rx_chain(SYSCTL_HANDLER_ARGS);
+static int bxe_sysctl_dump_rx_cq_chain(SYSCTL_HANDLER_ARGS);
+static int bxe_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS);
 static int bxe_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS);
 static int bxe_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
 static int bxe_sysctl_breakpoint(SYSCTL_HANDLER_ARGS);
@@ -357,16 +358,16 @@ static void bxe_dump_enet(struct bxe_sof
 static void bxe_dump_mbuf (struct bxe_softc *, struct mbuf *);
 static void bxe_dump_tx_mbuf_chain(struct bxe_softc *, int, int);
 static void bxe_dump_rx_mbuf_chain(struct bxe_softc *, int, int);
-static void bxe_dump_pbd_locked(struct bxe_softc *,int,
+static void bxe_dump_tx_parsing_bd(struct bxe_fastpath *,int,
 	    struct eth_tx_parse_bd *);
-static void bxe_dump_txbd_locked(struct bxe_fastpath *, int,
-	    struct eth_tx_bd *);
-static void bxe_dump_rxbd_locked(struct bxe_fastpath *, int,
+static void bxe_dump_txbd(struct bxe_fastpath *, int,
+	    union eth_tx_bd_types *);
+static void bxe_dump_rxbd(struct bxe_fastpath *, int,
 	    struct eth_rx_bd *);
-static void bxe_dump_cqe_locked(struct bxe_fastpath *, int, union eth_rx_cqe *);
+static void bxe_dump_cqe(struct bxe_fastpath *, int, union eth_rx_cqe *);
 static void bxe_dump_tx_chain(struct bxe_fastpath *, int, int);
-static void bxe_dump_cqe_chain(struct bxe_fastpath *, int, int);
-static void bxe_dump_rx_chain(struct bxe_fastpath *, int, int);
+static void bxe_dump_rx_cq_chain(struct bxe_fastpath *, int, int);
+static void bxe_dump_rx_bd_chain(struct bxe_fastpath *, int, int);
 static void bxe_dump_status_block(struct bxe_softc *);
 static void bxe_dump_stats_block(struct bxe_softc *);
 static void bxe_dump_fp_state(struct bxe_fastpath *);
@@ -404,6 +405,7 @@ static device_method_t bxe_methods[] = {
 	KOBJMETHOD_END
 };
 
+
 static driver_t bxe_driver = {
 	"bxe",
 	bxe_methods,
@@ -1725,11 +1727,11 @@ bxe_attach(device_t dev)
 	 * processor memory.
 	 */
 	rid = PCIR_BAR(0);
-	sc->bxe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
-		&rid, RF_ACTIVE);
+	sc->bxe_res = bus_alloc_resource_any(dev,
+	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
 	if (sc->bxe_res == NULL) {
 		BXE_PRINTF("%s(%d):PCI BAR0 memory allocation failed\n",
-			__FILE__, __LINE__);
+		    __FILE__, __LINE__);
 		rc = ENXIO;
 		goto bxe_attach_fail;
 	}
@@ -1744,11 +1746,11 @@ bxe_attach(device_t dev)
 	 * Doorbell (DB) memory.
 	 */
 	rid = PCIR_BAR(2);
-	sc->bxe_db_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
-		&rid, RF_ACTIVE);
+	sc->bxe_db_res = bus_alloc_resource_any(dev,
+	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
 	if (sc->bxe_db_res == NULL) {
 		BXE_PRINTF("%s(%d): PCI BAR2 memory allocation failed\n",
-			__FILE__, __LINE__);
+		    __FILE__, __LINE__);
 		rc = ENXIO;
 		goto bxe_attach_fail;
 	}
@@ -1834,7 +1836,6 @@ bxe_attach(device_t dev)
 
 
 #ifdef BXE_DEBUG
-
 	/* Allocate a memory buffer for grcdump output.*/
 	sc->grcdump_buffer = malloc(BXE_GRCDUMP_BUF_SIZE, M_TEMP, M_NOWAIT);
 	if (sc->grcdump_buffer == NULL) {
@@ -3738,8 +3739,8 @@ bxe_init_locked(struct bxe_softc *sc, in
 	callout_reset(&sc->bxe_tick_callout, hz, bxe_tick, sc);
 	/* Everything went OK, go ahead and exit. */
 	goto bxe_init_locked_exit;
+
 	/* Try and gracefully shutdown the device because of a failure. */
-/* Try and gracefully shutdown the device because of a failure. */
 bxe_init_locked_failed4:
 
 	for (i = 1; i < sc->num_queues; i++)
@@ -4413,6 +4414,7 @@ bxe_write_dmae(struct bxe_softc *sc, bus
 
 	DELAY(50);
 
+	/* Wait up to 200ms. */
 	timeout = 4000;
 	while (*wb_comp != BXE_WB_COMP_VAL) {
 		if (!timeout) {
@@ -4433,18 +4435,18 @@ bxe_write_dmae_exit:
 
 
 /*
-* Perform a DMAE read from to device memory.
-*
+ * Perform a DMAE read from to device memory.
+ *
  * Some of the registers on the 577XX controller are 128bits wide.  It is
  * required that when accessing those registers that they be read
  * atomically and that no intervening bus acceses to the device occur.
  * This could be handled by a lock held across all driver instances for
  * the device or it can be handled by performing a DMA operation when
  * reading from the device.  This code implements the latter.
-*
-* Returns:
-*   None.
-*/
+ *
+ * Returns:
+ *   None.
+ */
 void
 bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr,
     uint32_t len32)
@@ -7244,7 +7246,8 @@ bxe_stats_init(struct bxe_softc *sc)
 		    sizeof(struct ustorm_per_client_stats));
 		memset(&fp->old_xclient, 0,
 		    sizeof(struct xstorm_per_client_stats));
-		memset(&fp->eth_q_stats, 0, sizeof(struct bxe_q_stats));
+		memset(&fp->eth_q_stats, 0,
+		    sizeof(struct bxe_q_stats));
 	}
 
 	sc->stats_state = STATS_STATE_DISABLED;
@@ -8539,6 +8542,7 @@ bxe_chktso_window(struct bxe_softc* sc, 
 	return (defrag);
 }
 
+
 /*
  * Encapsultes an mbuf cluster into the tx_bd chain structure and
  * makes the memory visible to the controller.
@@ -8557,7 +8561,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 	bus_dma_segment_t segs[32];
 	bus_dmamap_t map;
 	struct mbuf *m0;
-	struct eth_tx_parse_bd *pbd;
+	struct eth_tx_parse_bd *tx_parse_bd;
 	struct eth_tx_bd *tx_data_bd;
 	struct eth_tx_bd *tx_total_pkt_size_bd;
 	struct eth_tx_start_bd *tx_start_bd;
@@ -8580,7 +8584,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 	tx_total_pkt_size_bd = NULL;
 	tx_start_bd = NULL;
 	tx_data_bd = NULL;
-	pbd = NULL;
+	tx_parse_bd = NULL;
 
 	pkt_prod = fp->tx_pkt_prod;
 	bd_prod = TX_BD(fp->tx_bd_prod);
@@ -8693,6 +8697,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 				}
 				break;
 			}
+
 			/* Last try */
 			if (m0->m_pkthdr.csum_flags & CSUM_TSO){
 				if (bxe_chktso_window(sc,nsegs,segs,m0))
@@ -8765,11 +8770,11 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 	 * however, it is only used for tso & chksum.
 	 */
 	bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
-	pbd = (struct eth_tx_parse_bd *)
+	tx_parse_bd = (struct eth_tx_parse_bd *)
 		   &fp->tx_bd_chain[TX_PAGE(bd_prod)][TX_IDX(bd_prod)].parse_bd;
-	memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+	memset(tx_parse_bd, 0, sizeof(struct eth_tx_parse_bd));
 
-	/* Gather all info about the packet and add to pbd */
+	/* Gather all info about the packet and add to tx_parse_bd */
 	if (m0->m_pkthdr.csum_flags) {
 		struct ether_vlan_header *eh;
 		struct ip *ip = NULL;
@@ -8790,8 +8795,9 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 		}
 
 		/* Set the Ethernet header length in 16 bit words. */
-		pbd->global_data = (e_hlen + ovlan) >> 1;
-		pbd->global_data |= ((m0->m_flags & M_VLANTAG) << ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT);
+		tx_parse_bd->global_data = (e_hlen + ovlan) >> 1;
+		tx_parse_bd->global_data |= ((m0->m_flags & M_VLANTAG) <<
+		    ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT);
 
 		switch (etype) {
 		case ETHERTYPE_IP:{
@@ -8802,10 +8808,10 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 				ip = (struct ip *)(m0->m_data + e_hlen);
 
 			/* Calculate IP header length (16 bit words). */
-			pbd->ip_hlen = (ip->ip_hl << 1);
+			tx_parse_bd->ip_hlen = (ip->ip_hl << 1);
 
 			/* Calculate enet + IP header length (16 bit words). */
-			pbd->total_hlen = pbd->ip_hlen + (e_hlen >> 1);
+			tx_parse_bd->total_hlen = tx_parse_bd->ip_hlen + (e_hlen >> 1);
 
 			if (m0->m_pkthdr.csum_flags & CSUM_IP) {
 				DBPRINT(sc, BXE_EXTREME_SEND, "%s(): IP checksum "
@@ -8816,7 +8822,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 
 			/* Handle any checksums requested by the stack. */
 			if ((m0->m_pkthdr.csum_flags & CSUM_TCP)||
-				(m0->m_pkthdr.csum_flags & CSUM_TSO)){
+			    (m0->m_pkthdr.csum_flags & CSUM_TSO)){
 
 				/* Perform TCP checksum offload. */
 				DBPRINT(sc, BXE_EXTREME_SEND, "%s(): TCP checksum "
@@ -8830,10 +8836,10 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 				DBRUN(sc->debug_tcp_csum_offload_frames++);
 
 				/* Update the enet + IP + TCP header length. */
-				pbd->total_hlen += (uint16_t)(th->th_off << 1);
+				tx_parse_bd->total_hlen += (uint16_t)(th->th_off << 1);
 
 				/* Get the pseudo header checksum. */
-				pbd->tcp_pseudo_csum = ntohs(th->th_sum);
+				tx_parse_bd->tcp_pseudo_csum = ntohs(th->th_sum);
 			} else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
 				/*
 				 * The hardware doesn't actually support UDP checksum
@@ -8861,7 +8867,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 				/* Add the TCP checksum offload flag for UDP frames too. */
 				flags |= ETH_TX_BD_FLAGS_L4_CSUM;
 				DBRUN(sc->debug_udp_csum_offload_frames++);
-				pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
+				tx_parse_bd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
 
 				/* Get a pointer to the UDP header. */
 				uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
@@ -8878,8 +8884,8 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 					ntohl((*(tmp_uh + 2)) & 0x0000FFFF));
 
 				/* Update the enet + IP + UDP header length. */
-				pbd->total_hlen += (sizeof(struct udphdr) >> 1);
-				pbd->tcp_pseudo_csum = ~in_addword(uh->uh_sum, ~tmp_csum);
+				tx_parse_bd->total_hlen += (sizeof(struct udphdr) >> 1);
+				tx_parse_bd->tcp_pseudo_csum = ~in_addword(uh->uh_sum, ~tmp_csum);
 			}
 
 			/* Update the flags settings for VLAN/Offload. */
@@ -8900,9 +8906,10 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 		/* Setup the Parsing BD with TSO specific info */
 		if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
 
-			uint16_t hdr_len = pbd->total_hlen << 1;
+			uint16_t hdr_len = tx_parse_bd->total_hlen << 1;
 
-			DBPRINT(sc, BXE_EXTREME_SEND, "%s(): TSO is enabled.\n",__FUNCTION__);
+			DBPRINT(sc, BXE_EXTREME_SEND, "%s(): TSO is enabled.\n",
+			    __FUNCTION__);
 
 			tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
 
@@ -8918,9 +8925,9 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 					__FUNCTION__, tx_start_bd->nbytes, tx_start_bd->addr_hi,
 					tx_start_bd->addr_lo, nbds);
 
-	bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
+				bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
 
-				/* Get a new transmit BD (after the pbd) and fill it. */
+				/* Get a new transmit BD (after the tx_parse_bd) and fill it. */
 				tx_data_bd = &fp->tx_bd_chain[TX_PAGE(bd_prod)][TX_IDX(bd_prod)].reg_bd;
 				tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hdr_len));
 				tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hdr_len));
@@ -8934,23 +8941,26 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 				 */
 
 				DBPRINT(sc, BXE_EXTREME_SEND, "%s(): TSO split data "
-					"size is %d (%x:%x)\n", __FUNCTION__,
-					tx_data_bd->nbytes, tx_data_bd->addr_hi, tx_data_bd->addr_lo);
+				    "size is %d (%x:%x)\n", __FUNCTION__,
+				    tx_data_bd->nbytes, tx_data_bd->addr_hi,
+				    tx_data_bd->addr_lo);
 			}
 
 			/*
 			 * For TSO the controller needs the following info:
 			 * MSS, tcp_send_seq, ip_id, and tcp_pseudo_csum.
 			 */
-			pbd->lso_mss = htole16(m0->m_pkthdr.tso_segsz);
-			pbd->tcp_send_seq = ntohl(th->th_seq);
-			pbd->tcp_flags = th->th_flags;
-			pbd->ip_id = ntohs(ip->ip_id);
+			tx_parse_bd->lso_mss = htole16(m0->m_pkthdr.tso_segsz);
+			tx_parse_bd->tcp_send_seq = ntohl(th->th_seq);
+			tx_parse_bd->tcp_flags = th->th_flags;
+			tx_parse_bd->ip_id = ntohs(ip->ip_id);
+
+			tx_parse_bd->tcp_pseudo_csum =
+			    ntohs(in_pseudo(ip->ip_src.s_addr,
+			    ip->ip_dst.s_addr, htons(IPPROTO_TCP)));
 
-			pbd->tcp_pseudo_csum = ntohs(in_pseudo(ip->ip_src.s_addr,
-								   ip->ip_dst.s_addr, htons(IPPROTO_TCP)));
-
-			pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+			tx_parse_bd->global_data |=
+			    ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
 		}
 	}
 
@@ -8971,17 +8981,6 @@ bxe_tx_encap(struct bxe_fastpath *fp, st
 
 	/* Update bd producer index value for next tx */
 	bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
-/*
-BXE_PRINTF("tx_start_bd: addrlo:0x%x, addrhi:0x%x, nbytes:0x%x, bitfield:0x%x, gendata:0x%x, nbd:0x%x\n",
-tx_start_bd->addr_lo,
-tx_start_bd->addr_hi,
-tx_start_bd->nbytes,
-tx_start_bd->bd_flags.as_bitfield,
-tx_start_bd->general_data,
-tx_start_bd->nbd );
-
-bxe_dump_mbuf(sc, m0);
-*/
 	DBRUNMSG(BXE_EXTREME_SEND, bxe_dump_tx_chain(fp, debug_prod, nbds));
 
 	/*
@@ -9391,12 +9390,12 @@ bxe_ioctl(struct ifnet *ifp, u_long comm
  * 	 The adjusted value of *fp->rx_cons_sb.
  */
 static __inline uint16_t
-bxe_rx_comp_cons(struct bxe_fastpath *fp)
+bxe_rx_cq_cons(struct bxe_fastpath *fp)
 {
 	volatile uint16_t rx_cons_sb = 0;
 
 	rmb();
-	rx_cons_sb = (volatile uint16_t)le16toh(*fp->rx_cons_sb);
+	rx_cons_sb = (volatile uint16_t) le16toh(*fp->rx_cons_sb);
 
 	/*
 	 * It is valid for the hardware's copy of the completion
@@ -9405,8 +9404,8 @@ bxe_rx_comp_cons(struct bxe_fastpath *fp
 	 * that it is pointing at the next available CQE so we
 	 * need to adjust the value accordingly.
 	 */
-	if ((rx_cons_sb & TOTAL_RCQ_ENTRIES_PER_PAGE) ==
-		TOTAL_RCQ_ENTRIES_PER_PAGE)
+	if ((rx_cons_sb & USABLE_RCQ_ENTRIES_PER_PAGE) ==
+		USABLE_RCQ_ENTRIES_PER_PAGE)
 		rx_cons_sb++;
 
 	return (rx_cons_sb);
@@ -9434,7 +9433,7 @@ bxe_has_rx_work(struct bxe_fastpath *fp)
 {
 
 	rmb();
-	return (bxe_rx_comp_cons(fp) != fp->rx_comp_cons);
+	return (bxe_rx_cq_cons(fp) != fp->rx_cq_cons);
 }
 
 /*
@@ -10306,7 +10305,7 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 	struct eth_rx_bd *rx_bd;
 	struct eth_rx_cqe_next_page *nextpg;
 	uint16_t ring_prod, cqe_ring_prod;
-	int func, i, j, max_agg_queues;
+	int func, i, j, rcq_idx, rx_idx, rx_sge_idx, max_agg_queues;
 
 	DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
 
@@ -10374,16 +10373,19 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 
 			/* Link the SGE Ring Pages to form SGE chain */
 			for (j = 0; j < NUM_RX_SGE_PAGES; j++) {
-				DBPRINT(sc,
-				    (BXE_INSANE_LOAD | BXE_INSANE_RESET),
-				    "%s(): Linking fp[%d] SGE ring[%d].\n",
-				    __FUNCTION__, i, j);
-
+				rx_sge_idx = ((j + 1) % NUM_RX_SGE_PAGES);
 				sge = &fp->rx_sge_chain[j][MAX_RX_SGE_CNT];
+
+				DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
+				    "%s(): fp[%02d].rx_sge_chain[%02d][0x%04X]=0x%jX\n",
+				     __FUNCTION__, i, j,
+				    (uint16_t) MAX_RX_SGE_CNT,
+				    (uintmax_t) fp->rx_sge_chain_paddr[rx_sge_idx]);
+
 				sge->addr_hi =
-				    htole32(U64_HI(fp->rx_sge_chain_paddr[(j + 1) %	NUM_RX_SGE_PAGES]));
+				    htole32(U64_HI(fp->rx_sge_chain_paddr[rx_sge_idx]));
 				sge->addr_lo =
-				    htole32(U64_LO(fp->rx_sge_chain_paddr[(j + 1) %	NUM_RX_SGE_PAGES]));
+				    htole32(U64_LO(fp->rx_sge_chain_paddr[rx_sge_idx]));
 			}
 
 			bxe_init_sge_ring_bit_mask(fp);
@@ -10394,16 +10396,19 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 
 		/* Link the pages to form the RX BD Chain. */
 		for (j = 0; j < NUM_RX_PAGES; j++) {
+			rx_idx = ((j + 1) % NUM_RX_PAGES);
 			rx_bd = &fp->rx_bd_chain[j][USABLE_RX_BD_PER_PAGE];
 
-			DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
-			    "%s(): Linking fp[%d] RX BD chain page[%d].\n",
-			    __FUNCTION__, i, j);
+			DBPRINT(sc, (BXE_INFO_LOAD),
+			    "%s(): fp[%02d].rx_bd_chain[%02d][0x%04X]=0x%jX\n",
+			     __FUNCTION__, i, j,
+			    (uint16_t) USABLE_RX_BD_PER_PAGE,
+			    (uintmax_t) fp->rx_bd_chain_paddr[rx_idx]);
 
 			rx_bd->addr_hi =
-			    htole32(U64_HI(fp->rx_bd_chain_paddr[(j + 1) % NUM_RX_PAGES]));
+			    htole32(U64_HI(fp->rx_bd_chain_paddr[rx_idx]));
 			rx_bd->addr_lo =
-			    htole32(U64_LO(fp->rx_bd_chain_paddr[(j + 1) % NUM_RX_PAGES]));
+			    htole32(U64_LO(fp->rx_bd_chain_paddr[rx_idx]));
 		}
 
 		DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
@@ -10412,17 +10417,20 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 
 		/* Link the pages to form the RX Completion Queue.*/
 		for (j = 0; j < NUM_RCQ_PAGES; j++) {
+			rcq_idx = ((j + 1) % NUM_RCQ_PAGES);
 			nextpg = (struct eth_rx_cqe_next_page *)
-			    &fp->rx_comp_chain[j][USABLE_RCQ_ENTRIES_PER_PAGE];
+			    &fp->rx_cq_chain[j][USABLE_RCQ_ENTRIES_PER_PAGE];
 
-			DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
-	"%s(): Linking fp[%d] RX completion chain page[%d].\n",
-			     __FUNCTION__, i, j);
+			DBPRINT(sc, (BXE_INFO_LOAD),
+			    "%s(): fp[%02d].rx_cq_chain[%02d][0x%04X]=0x%jX\n",
+			     __FUNCTION__, i, j,
+			    (uint16_t) USABLE_RCQ_ENTRIES_PER_PAGE,
+			    (uintmax_t) fp->rx_cq_chain_paddr[rcq_idx]);
 
 			nextpg->addr_hi =
-			    htole32(U64_HI(fp->rx_comp_chain_paddr[(j + 1) % NUM_RCQ_PAGES]));
+			    htole32(U64_HI(fp->rx_cq_chain_paddr[rcq_idx]));
 			nextpg->addr_lo =
-			    htole32(U64_LO(fp->rx_comp_chain_paddr[(j + 1) % NUM_RCQ_PAGES]));
+			    htole32(U64_LO(fp->rx_cq_chain_paddr[rcq_idx]));
 		}
 
 		if (TPA_ENABLED(sc)) {
@@ -10432,7 +10440,8 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 			while (ring_prod < sc->rx_ring_size) {
 				if (bxe_alloc_rx_sge(sc, fp, ring_prod) != 0) {
 					BXE_PRINTF(
-	"%s(%d): Memory allocation failure! Disabling TPA for fp[%d].\n",
+					    "%s(%d): Memory allocation failure! "
+					    "Disabling TPA for fp[%d].\n",
 					    __FILE__, __LINE__, i);
 
 					/* Cleanup already allocated elements */
@@ -10452,7 +10461,7 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 		 * Allocate buffers for all the RX BDs in RX BD Chain.
 		 * Add completion queue entries at the same time.
 		 */
-		fp->rx_comp_cons = ring_prod = cqe_ring_prod = 0;
+		fp->rx_cq_cons = ring_prod = cqe_ring_prod = 0;
 		DBRUN(fp->free_rx_bd = USABLE_RX_BD);
 
 		while (ring_prod < sc->rx_ring_size) {
@@ -10469,9 +10478,9 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 		/* Update the driver's copy of the producer indices. */
 		fp->rx_bd_prod = ring_prod;
 
-		fp->rx_comp_prod = cqe_ring_prod;
+		fp->rx_cq_prod = cqe_ring_prod;
 		/*
-		 * fp->rx_comp_prod =
+		 * fp->rx_cq_prod =
 		 *     (uint16_t)min(NUM_RCQ_PAGES*TOTAL_RCQ_ENTRIES_PER_PAGE,
 		 *     cqe_ring_prod);
 		 */
@@ -10485,8 +10494,8 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 			    BUS_DMASYNC_PREWRITE);
 
 		for (j = 0; j < NUM_RCQ_PAGES; j++)
-			bus_dmamap_sync(fp->rx_comp_chain_tag,
-			    fp->rx_comp_chain_map[j], BUS_DMASYNC_PREREAD |
+			bus_dmamap_sync(fp->rx_cq_chain_tag,
+			    fp->rx_cq_chain_map[j], BUS_DMASYNC_PREREAD |
 			    BUS_DMASYNC_PREWRITE);
 
 		/*
@@ -10494,7 +10503,7 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 		 * Warning! this will generate an interrupt (to the TSTORM).
 		 * This must only be done when the controller is initialized.
 		 */
-		bxe_update_rx_prod(sc, fp, ring_prod, fp->rx_comp_prod,
+		bxe_update_rx_prod(sc, fp, ring_prod, fp->rx_cq_prod,
 		    fp->rx_sge_prod);
 
 		if (i != 0)
@@ -10502,10 +10511,10 @@ bxe_init_rx_chains(struct bxe_softc *sc)
 
 		REG_WR(sc, BAR_USTORM_INTMEM +
 		    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
-		    U64_LO(fp->rx_comp_chain_paddr[0]));
+		    U64_LO(fp->rx_cq_chain_paddr[0]));
 		REG_WR(sc, BAR_USTORM_INTMEM +
 		    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
-		    U64_HI(fp->rx_comp_chain_paddr[0]));
+		    U64_HI(fp->rx_cq_chain_paddr[0]));
 	}
 
 	DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
@@ -10521,7 +10530,7 @@ static void
 bxe_init_tx_chains(struct bxe_softc *sc)
 {
 	struct bxe_fastpath *fp;
-	struct eth_tx_next_bd *tx_bd;
+	struct eth_tx_next_bd *tx_n_bd;
 	int i, j;
 
 	DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
@@ -10532,17 +10541,17 @@ bxe_init_tx_chains(struct bxe_softc *sc)
 		    "%s(): Linking fp[%d] TX chain pages.\n", __FUNCTION__, i);
 
 		for (j = 0; j < NUM_TX_PAGES; j++) {
-			tx_bd =
+			tx_n_bd =
 			    &fp->tx_bd_chain[j][USABLE_TX_BD_PER_PAGE].next_bd;
 
 			DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
 			    "%s(): Linking fp[%d] TX BD chain page[%d].\n",
 			    __FUNCTION__, i, j);
 
-			tx_bd->addr_hi =
+			tx_n_bd->addr_hi =
 			    htole32(U64_HI(fp->tx_bd_chain_paddr[(j + 1) %
 			    NUM_TX_PAGES]));
-			tx_bd->addr_lo =
+			tx_n_bd->addr_lo =
 			    htole32(U64_LO(fp->tx_bd_chain_paddr[(j + 1) %
 			    NUM_TX_PAGES]));
 		}
@@ -10609,8 +10618,8 @@ bxe_free_rx_chains(struct bxe_softc *sc)
 
 			/* Clear each RX completion queue page. */
 			for (j = 0; j < NUM_RCQ_PAGES; j++) {
-				if (fp->rx_comp_chain[j] != NULL)
-					bzero((char *)fp->rx_comp_chain[j],
+				if (fp->rx_cq_chain[j] != NULL)
+					bzero((char *)fp->rx_cq_chain[j],
 					    BXE_RX_CHAIN_PAGE_SZ);
 			}
 
@@ -11190,14 +11199,14 @@ bxe_init_internal_func(struct bxe_softc 
 	for (i = 0; i < sc->num_queues; i++) {
 		fp = &sc->fp[i];
 		nextpg = (struct eth_rx_cqe_next_page *)
-		    &fp->rx_comp_chain[i][USABLE_RCQ_ENTRIES_PER_PAGE];
+		    &fp->rx_cq_chain[i][USABLE_RCQ_ENTRIES_PER_PAGE];
 		/* Program the completion queue address. */
 		REG_WR(sc, BAR_USTORM_INTMEM +
 		    USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
-		    U64_LO(fp->rx_comp_chain_paddr[0]));
+		    U64_LO(fp->rx_cq_chain_paddr[0]));
 		REG_WR(sc, BAR_USTORM_INTMEM +
 		    USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
-		    U64_HI(fp->rx_comp_chain_paddr[0]));
+		    U64_HI(fp->rx_cq_chain_paddr[0]));
 
 		/* Next page */
 		REG_WR(sc, BAR_USTORM_INTMEM + USTORM_CQE_PAGE_NEXT_OFFSET(port,
@@ -12806,22 +12815,22 @@ bxe_dma_free(struct bxe_softc *sc)
 				 * Free, unmap and destroy all RX CQ
 				 * chain pages.
 				 */
-				if (fp->rx_comp_chain_tag != NULL) {
+				if (fp->rx_cq_chain_tag != NULL) {
 					for (j = 0; j < NUM_RCQ_PAGES; j++ ) {
-						if (fp->rx_comp_chain_map[j] != NULL) {
-							if (fp->rx_comp_chain[j] != NULL)
-								bus_dmamem_free(fp->rx_comp_chain_tag,
-									fp->rx_comp_chain[j],
-									fp->rx_comp_chain_map[j]);
-
-							bus_dmamap_unload(fp->rx_comp_chain_tag,
-								fp->rx_comp_chain_map[j]);
-							bus_dmamap_destroy(fp->rx_comp_chain_tag,
-								fp->rx_comp_chain_map[j]);
+						if (fp->rx_cq_chain_map[j] != NULL) {
+							if (fp->rx_cq_chain[j] != NULL)
+								bus_dmamem_free(fp->rx_cq_chain_tag,
+									fp->rx_cq_chain[j],
+									fp->rx_cq_chain_map[j]);
+
+							bus_dmamap_unload(fp->rx_cq_chain_tag,
+								fp->rx_cq_chain_map[j]);
+							bus_dmamap_destroy(fp->rx_cq_chain_tag,
+								fp->rx_cq_chain_map[j]);
 						}
 					}
 
-					bus_dma_tag_destroy(fp->rx_comp_chain_tag);
+					bus_dma_tag_destroy(fp->rx_cq_chain_tag);
 				}
 
 				/* Unload and destroy the TX mbuf maps. */
@@ -13093,7 +13102,7 @@ bxe_dma_alloc(device_t dev)
 	    NULL,			/* filter f() */
 	    NULL,			/* filter f() arg */
 	    MAXBSIZE,			/* max map for this tag */
-	    BUS_SPACE_UNRESTRICTED,	/* #of discontinuities */
+	    BUS_SPACE_UNRESTRICTED,	/* # of discontinuities */
 	    BUS_SPACE_MAXSIZE_32BIT,	/* max seg size */
 	    0,				/* flags */
 	    NULL,			/* lock f() */
@@ -13115,9 +13124,9 @@ bxe_dma_alloc(device_t dev)
 		    (long unsigned int)sizeof(struct bxe_fastpath));
 
 		/*
-		 * Create a DMA tag for the status block, allocate and clear the
-		 * memory, map the memory into DMA space, and fetch the physical
-		 * address of the block.
+		 * Create a DMA tag for the status block, allocate and
+		 * clear the memory, map the memory into DMA space, and
+		 * fetch the physical address of the block.
 		 */
 
 		if (bus_dma_tag_create(sc->parent_tag,
@@ -13266,10 +13275,11 @@ bxe_dma_alloc(device_t dev)
 
 		/* Create DMA maps for each the TX mbuf cluster(ext buf). */
 		for (j = 0; j < TOTAL_TX_BD; j++) {
-			if (bus_dmamap_create(fp->tx_mbuf_tag, BUS_DMA_NOWAIT,
+			if (bus_dmamap_create(fp->tx_mbuf_tag,
+			    BUS_DMA_NOWAIT,
 			    &(fp->tx_mbuf_map[j]))) {
-				BXE_PRINTF(
-		"%s(%d): Unable to create fp[%d] TX mbuf DMA map!\n",
+				BXE_PRINTF("%s(%d): Unable to create fp[%d] "
+				    "TX mbuf DMA map!\n",
 				    __FILE__, __LINE__, i);
 				rc = ENOMEM;
 				goto bxe_dma_alloc_exit;
@@ -13363,8 +13373,8 @@ bxe_dma_alloc(device_t dev)
 
 		/* Create DMA maps for the RX mbuf clusters. */
 		for (j = 0; j < TOTAL_RX_BD; j++) {
-			if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT,
-			    &(fp->rx_mbuf_map[j]))) {
+			if (bus_dmamap_create(fp->rx_mbuf_tag,
+			    BUS_DMA_NOWAIT, &(fp->rx_mbuf_map[j]))) {
 				BXE_PRINTF(
 		"%s(%d): Unable to create fp[%d] RX mbuf DMA map!\n",
 				    __FILE__, __LINE__, i);
@@ -13392,7 +13402,7 @@ bxe_dma_alloc(device_t dev)
 		    0,			/* flags */
 		    NULL,		/* lock f() */
 		    NULL,		/* lock f() arg */
-		    &fp->rx_comp_chain_tag)) {
+		    &fp->rx_cq_chain_tag)) {
 			BXE_PRINTF(
 	"%s(%d): Could not allocate fp[%d] RX Completion Queue DMA tag!\n",
 			    __FILE__, __LINE__, i);
@@ -13401,9 +13411,9 @@ bxe_dma_alloc(device_t dev)
 		}
 
 		for (j = 0; j < NUM_RCQ_PAGES; j++) {
-			if (bus_dmamem_alloc(fp->rx_comp_chain_tag,
-			    (void **)&fp->rx_comp_chain[j], BUS_DMA_NOWAIT,
-			    &fp->rx_comp_chain_map[j])) {
+			if (bus_dmamem_alloc(fp->rx_cq_chain_tag,
+			    (void **)&fp->rx_cq_chain[j], BUS_DMA_NOWAIT,
+			    &fp->rx_cq_chain_map[j])) {
 				BXE_PRINTF(
 	"%s(%d): Could not allocate fp[%d] RX Completion Queue DMA memory!\n",
 				    __FILE__, __LINE__, i);
@@ -13411,11 +13421,11 @@ bxe_dma_alloc(device_t dev)
 				goto bxe_dma_alloc_exit;
 			}
 
-			bzero((char *)fp->rx_comp_chain[j],
+			bzero((char *)fp->rx_cq_chain[j],
 			    BXE_RX_CHAIN_PAGE_SZ);
 
-			error = bus_dmamap_load(fp->rx_comp_chain_tag,
-			    fp->rx_comp_chain_map[j], fp->rx_comp_chain[j],
+			error = bus_dmamap_load(fp->rx_cq_chain_tag,
+			    fp->rx_cq_chain_map[j], fp->rx_cq_chain[j],
 			    BXE_RX_CHAIN_PAGE_SZ, bxe_dma_map_addr, &busaddr,
 			    BUS_DMA_NOWAIT);
 
@@ -13431,17 +13441,17 @@ bxe_dma_alloc(device_t dev)
 			 * Physical address of each page in the RX
 			 * Completion Chain.
 			 */
-			fp->rx_comp_chain_paddr[j] = busaddr;
+			fp->rx_cq_chain_paddr[j] = busaddr;
 
 			DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
-			    "%s(): fp[%d]->rx_comp_chain_paddr[%d] = 0x%jX\n",
+			    "%s(): fp[%d]->rx_cq_chain_paddr[%d] = 0x%jX\n",
 			    __FUNCTION__, i, j, (uintmax_t)busaddr);
 		}
 
 		if (TPA_ENABLED(sc)) {
 			int tpa_pool_max = CHIP_IS_E1H(sc) ?
-				ETH_MAX_AGGREGATION_QUEUES_E1H :
-				ETH_MAX_AGGREGATION_QUEUES_E1;
+			    ETH_MAX_AGGREGATION_QUEUES_E1H :
+			    ETH_MAX_AGGREGATION_QUEUES_E1;
 
 			/*
 			 * Create a DMA tag for the RX SGE Ring,
@@ -14213,6 +14223,7 @@ bxe_ifmedia_upd(struct ifnet *ifp)
 
 	ifm = &sc->bxe_ifmedia;
 	rc = 0;
+
 	/* This is an Ethernet controller. */
 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
 		rc = EINVAL;
@@ -14672,27 +14683,27 @@ bxe_tpa_stop_exit:
  */
 static __inline void
 bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp,
-    uint16_t bd_prod, uint16_t rx_comp_prod, uint16_t rx_sge_prod)
+    uint16_t bd_prod, uint16_t rx_cq_prod, uint16_t sge_prod)
 {
 	volatile struct ustorm_eth_rx_producers rx_prods = {0};
 	int i;
 
 	/* Update producers. */
-	rx_prods.bd_prod =  bd_prod;/* htole16(bd_prod);*/
-	rx_prods.cqe_prod = rx_comp_prod;/*htole16(rx_comp_prod);*/
-	rx_prods.sge_prod = rx_sge_prod;/*htole16(rx_sge_prod);*/
+	rx_prods.bd_prod =  bd_prod;
+	rx_prods.cqe_prod = rx_cq_prod;
+	rx_prods.sge_prod = sge_prod;
 
 	wmb();
 
 	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++){
 		REG_WR(sc, BAR_USTORM_INTMEM +
 		    USTORM_RX_PRODS_OFFSET(BP_PORT(sc), fp->cl_id) + i * 4,
-		    ((volatile uint32_t *)&rx_prods)[i]);
+		    ((volatile uint32_t *) &rx_prods)[i]);
 	}
 
-	DBPRINT(sc, BXE_EXTREME_RECV, "%s(): Wrote fp[%d] bd_prod = 0x%04X, "
-	    "cqe_prod = 0x%04X, sge_prod = 0x%04X\n", __FUNCTION__, fp->index,
-	    bd_prod, rx_comp_prod, rx_sge_prod);
+	DBPRINT(sc, BXE_EXTREME_RECV, "%s(%d): Wrote fp[%02d] bd_prod = 0x%04X, "
+	    "rx_cq_prod = 0x%04X, sge_prod = 0x%04X\n", __FUNCTION__, curcpu,
+	    fp->index, bd_prod, rx_cq_prod, sge_prod);
 }
 
 /*
@@ -14706,32 +14717,38 @@ bxe_rxeof(struct bxe_fastpath *fp)
 {
 	struct bxe_softc *sc;
 	struct ifnet *ifp;
-	uint16_t bd_cons, bd_prod, bd_prod_fw;
-	uint16_t hw_comp_cons_idx, sw_comp_cons_idx, sw_comp_prod;
-	uint16_t comp_ring_cons;
+	uint16_t rx_bd_cons, rx_bd_cons_idx;
+	uint16_t rx_bd_prod, rx_bd_prod_idx;
+	uint16_t rx_cq_cons, rx_cq_cons_idx;
+	uint16_t rx_cq_prod, rx_cq_cons_sb;
 
 #ifdef BXE_DEBUG
 	unsigned long rx_pkts = 0;
 #endif
+
 	sc = fp->sc;
 	ifp = sc->bxe_ifp;
 
 	DBENTER(BXE_EXTREME_RECV);
-	/* Get the sb's view of the RX completion consumer index. */
-	hw_comp_cons_idx = le16toh(*fp->rx_cons_sb);
-	if ((hw_comp_cons_idx & USABLE_RCQ_ENTRIES_PER_PAGE) ==
-	    USABLE_RCQ_ENTRIES_PER_PAGE)
-		hw_comp_cons_idx++;
-	/* Get working copies of the driver's view of the RX indices. */
-	bd_cons = fp->rx_bd_cons;
-	bd_prod = bd_prod_fw = fp->rx_bd_prod;
-	sw_comp_cons_idx = fp->rx_comp_cons;
-	sw_comp_prod = fp->rx_comp_prod;
+
+	/* Get the status block's view of the RX completion consumer index. */
+	rx_cq_cons_sb = bxe_rx_cq_cons(fp);
+
+	/*
+	 * Get working copies of the driver's view of the
+	 * RX indices. These are 16 bit values that are
+	 * expected to increment from from 0 to	65535
+	 * and then wrap-around to 0 again.
+	 */
+	rx_bd_cons = fp->rx_bd_cons;
+	rx_bd_prod = fp->rx_bd_prod;
+	rx_cq_cons = fp->rx_cq_cons;
+	rx_cq_prod = fp->rx_cq_prod;
 
 	DBPRINT(sc, (BXE_EXTREME_RECV),
-	    "%s(): fp[%d], bd_cons = 0x%04X, bd_prod = 0x%04X, "
-	    "sw_comp_cons = 0x%04X, sw_comp_prod = 0x%04X\n", __FUNCTION__,
-	    fp->index, bd_cons, bd_prod, sw_comp_cons_idx, sw_comp_prod);
+	    "%s(%d): BEFORE: fp[%d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
+	    "rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
+	    curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
 
 	/*
 	 * Memory barrier to prevent speculative reads of the RX buffer
@@ -14743,20 +14760,25 @@ bxe_rxeof(struct bxe_fastpath *fp)
 	 * Scan through the receive chain as long
 	 * as there is work to do.
 	 */
-	while (sw_comp_cons_idx != hw_comp_cons_idx) {
+	while (rx_cq_cons != rx_cq_cons_sb) {
 		struct mbuf *m;
 		union eth_rx_cqe *cqe;
 		uint8_t cqe_fp_flags;
 		uint16_t len, pad;
 
-		/* Convert the indices to an actual rx_bd index. */
-		comp_ring_cons = RCQ_ENTRY(sw_comp_cons_idx);
-		bd_prod = RX_BD(bd_prod);
-		bd_cons = RX_BD(bd_cons);
+		/*
+		 * Convert the 16 bit indices used by hardware
+		 * into values that map to the arrays used by
+		 * the driver (i.e. an index).
+		 */
+		rx_cq_cons_idx   = RCQ_ENTRY(rx_cq_cons);
+		rx_bd_prod_idx = RX_BD(rx_bd_prod);
+		rx_bd_cons_idx = RX_BD(rx_bd_cons);
 		wmb();
+
 		/* Fetch the cookie. */
-		cqe = (union eth_rx_cqe*)
-		    &fp->rx_comp_chain[RCQ_PAGE(comp_ring_cons)][RCQ_IDX(comp_ring_cons)];
+		cqe = (union eth_rx_cqe *)
+		    &fp->rx_cq_chain[RCQ_PAGE(rx_cq_cons_idx)][RCQ_IDX(rx_cq_cons_idx)];
 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
 
 #ifdef BXE_DEBUG
@@ -14769,9 +14791,10 @@ bxe_rxeof(struct bxe_fastpath *fp)
 		}
 #endif
 
-		DBRUNIF((cqe_fp_flags == 0), BXE_PRINTF(
-		    "%s(): CQE received with null type/error flags!\n",
-		    __FUNCTION__));
+		DBRUNIF((cqe_fp_flags == 0),
+		    BXE_PRINTF("%s(): CQE received with null "
+		    "type/error flags!\n", __FUNCTION__);
+		    bxe_dump_cqe(fp, rx_cq_cons_idx, cqe));
 
 		/* Check the CQE type for slowpath or fastpath completion. */
 		if (__predict_false(CQE_TYPE(cqe_fp_flags) ==
@@ -14803,8 +14826,8 @@ bxe_rxeof(struct bxe_fastpath *fp)
 				 * Check if a TPA aggregation has been started.
 				 */
 				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
-					bxe_tpa_start(fp, queue, bd_cons,
-					    bd_prod);
+					bxe_tpa_start(fp, queue,
+					    rx_bd_cons_idx, rx_bd_prod_idx);
 					goto bxe_rxeof_next_rx;
 				}
 
@@ -14826,7 +14849,7 @@ bxe_rxeof(struct bxe_fastpath *fp)
 					 * the frame up.
 					 */
 					bxe_tpa_stop(sc, fp, queue, pad, len,
-					    cqe, comp_ring_cons);
+					    cqe, rx_cq_cons_idx);
 					bxe_update_sge_prod(fp,
 					    &cqe->fast_path_cqe);
 					goto bxe_rxeof_next_cqe;
@@ -14834,8 +14857,8 @@ bxe_rxeof(struct bxe_fastpath *fp)
 			}
 
 			/* Remove the mbuf from the RX chain. */
-			m = fp->rx_mbuf_ptr[bd_cons];
-			fp->rx_mbuf_ptr[bd_cons] = NULL;
+			m = fp->rx_mbuf_ptr[rx_bd_cons_idx];
+			fp->rx_mbuf_ptr[rx_bd_cons_idx] = NULL;
 
 			DBRUN(fp->free_rx_bd++);
 			DBRUNIF((fp->free_rx_bd > USABLE_RX_BD),
@@ -14845,9 +14868,10 @@ bxe_rxeof(struct bxe_fastpath *fp)
 
 			/* Unmap the mbuf from DMA space. */
 			bus_dmamap_sync(fp->rx_mbuf_tag,
-			    fp->rx_mbuf_map[bd_cons], BUS_DMASYNC_POSTREAD);
+			    fp->rx_mbuf_map[rx_bd_cons_idx],
+			    BUS_DMASYNC_POSTREAD);
 			bus_dmamap_unload(fp->rx_mbuf_tag,
-			    fp->rx_mbuf_map[bd_cons]);
+			    fp->rx_mbuf_map[rx_bd_cons_idx]);
 
 			/* Check if the received frame has any errors. */
 			if (__predict_false(cqe_fp_flags &
@@ -14860,11 +14884,13 @@ bxe_rxeof(struct bxe_fastpath *fp)
 				fp->soft_rx_errors++;
 
 				/* Reuse the mbuf for a new frame. */
-				if (bxe_get_buf(fp, m, bd_prod)) {
+				if (bxe_get_buf(fp, m, rx_bd_prod_idx)) {
 					DBPRINT(sc, BXE_FATAL,
 					    "%s(): Can't reuse RX mbuf!\n",
 					    __FUNCTION__);
 					DBRUN(bxe_breakpoint(sc));
+
+					/* ToDo: Find alterntive to panic(). */
 					panic("bxe%d: Can't reuse RX mbuf!\n",
 					    sc->bxe_unit);
 				}
@@ -14885,7 +14911,8 @@ bxe_rxeof(struct bxe_fastpath *fp)
 			 */
 
 			/* Allocate a new mbuf for the receive chain. */
-			if (__predict_false(bxe_get_buf(fp, NULL, bd_prod))) {
+			if (__predict_false(bxe_get_buf(fp,
+			    NULL, rx_bd_prod_idx))) {
 				/*
 				 * Drop the current frame if we can't get
 				 * a new mbuf.
@@ -14897,12 +14924,11 @@ bxe_rxeof(struct bxe_fastpath *fp)
 				 * receive chain.
 				 */
 				if (__predict_false(bxe_get_buf(fp, m,
-				    bd_prod))) {
+				    rx_bd_prod_idx))) {
 					/* This is really bad! */
 					DBPRINT(sc, BXE_FATAL,
 					    "%s(): Can't reuse RX mbuf!\n",
 					    __FUNCTION__);
-
 					DBRUN(bxe_breakpoint(sc));
 
 					/* ToDo: Find alterntive to panic(). */
@@ -14980,12 +15006,12 @@ bxe_rxeof(struct bxe_fastpath *fp)
 			}
 
 			/* Last chance to check for problems. */
-			DBRUN(bxe_validate_rx_packet(fp, comp_ring_cons, cqe,
-			    m));
+			DBRUN(bxe_validate_rx_packet(fp, rx_cq_cons, cqe, m));
 
 			/* Pass the mbuf off to the upper layers. */
 			ifp->if_ipackets++;
 
+			/* ToDo: Any potential locking issues here? */
 			/* Pass the frame to the stack. */
 			(*ifp->if_input)(ifp, m);
 
@@ -14993,14 +15019,13 @@ bxe_rxeof(struct bxe_fastpath *fp)
 		}
 
 bxe_rxeof_next_rx:
-		bd_prod = NEXT_RX_BD(bd_prod);
-		bd_prod_fw = NEXT_RX_BD(bd_prod_fw);
-		bd_cons = NEXT_RX_BD(bd_cons);
+		rx_bd_prod = NEXT_RX_BD(rx_bd_prod);
+		rx_bd_cons = NEXT_RX_BD(rx_bd_cons);
 		DBRUN(rx_pkts++);
 
 bxe_rxeof_next_cqe:
-		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
-		sw_comp_cons_idx = NEXT_RCQ_IDX(sw_comp_cons_idx);
+		rx_cq_prod = NEXT_RCQ_IDX(rx_cq_prod);
+		rx_cq_cons = NEXT_RCQ_IDX(rx_cq_cons);
 
 		/*
 		 * Memory barrier to prevent speculative reads of the RX buffer
@@ -15010,18 +15035,19 @@ bxe_rxeof_next_cqe:
 	}
 
 	/* Update the driver copy of the fastpath indices. */
-	fp->rx_bd_cons = bd_cons;
-	fp->rx_bd_prod = bd_prod_fw;
-	fp->rx_comp_cons = sw_comp_cons_idx;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201103312240.p2VMeiEb024871>