Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 13 Jun 2016 07:30:55 +0000 (UTC)
From:      Sepherosa Ziehau <sephe@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r301861 - stable/10/sys/dev/hyperv/netvsc
Message-ID:  <201606130730.u5D7Utc6086560@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: sephe
Date: Mon Jun 13 07:30:54 2016
New Revision: 301861
URL: https://svnweb.freebsd.org/changeset/base/301861

Log:
  MFC 295743,295744,295745,295746,295747
  
  295743
      hyperv/hn: Change global tunable prefix to hw.hn
  
      And use SYSCTL+CTLFLAG_RDTUN for them.
  
      Suggested by:       adrian
      Reviewed by:        adrian, Hongjiang Zhang <honzhan microsoft com>
      Approved by:        adrian (mentor)
      MFC after:  1 week
      Sponsored by:       Microsoft OSTC
      Differential Revision:      https://reviews.freebsd.org/D5274
  
  295744
      hyperv/hn: Split RX ring data structure out of softc
  
      This paves the way for upcoming vRSS stuffs and eases more code cleanup.
  
      Reviewed by:        adrian
      Approved by:        adrian (mentor)
      MFC after:  1 week
      Sponsored by:       Microsoft OSTC
      Differential Revision:      https://reviews.freebsd.org/D5275
  
  295745
      hyperv/hn: Use taskqueue_enqueue()
  
      This also eases experiment on the non-fast taskqueue.
  
      Reviewed by:        adrian, Jun Su <junsu microsoft com>
      Approved by:        adrian (mentor)
      MFC after:  1 week
      Sponsored by:       Microsoft OSTC
      Differential Revision:      https://reviews.freebsd.org/D5276
  
  295746
      hyperv/hn: Use non-fast taskqueue for transmission
  
      Performance stays same; so no need to use fast taskqueue here.
  
  295747
      hyperv/hn: Split TX ring data structure out of softc
  
      This paves the way for upcoming vRSS stuffs and eases more code cleanup.
  
      Reviewed by:        adrian
      Approved by:        adrian (mentor)
      MFC after:  1 week
      Sponsored by:       Microsoft OSTC
      Differential Revision:      https://reviews.freebsd.org/D5283

Modified:
  stable/10/sys/dev/hyperv/netvsc/hv_net_vsc.h
  stable/10/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/hyperv/netvsc/hv_net_vsc.h
==============================================================================
--- stable/10/sys/dev/hyperv/netvsc/hv_net_vsc.h	Mon Jun 13 07:03:00 2016	(r301860)
+++ stable/10/sys/dev/hyperv/netvsc/hv_net_vsc.h	Mon Jun 13 07:30:54 2016	(r301861)
@@ -993,31 +993,25 @@ typedef struct {
 struct hn_txdesc;
 SLIST_HEAD(hn_txdesc_list, hn_txdesc);
 
-/*
- * Device-specific softc structure
- */
-typedef struct hn_softc {
-	struct ifnet    *hn_ifp;
-	struct arpcom   arpcom;
-	struct ifmedia	hn_media;
-	device_t        hn_dev;
-	uint8_t         hn_unit;
-	int             hn_carrier;
-	int             hn_if_flags;
-	struct mtx      hn_lock;
-	int             hn_initdone;
-	/* See hv_netvsc_drv_freebsd.c for rules on how to use */
-	int             temp_unusable;
-	struct hv_device  *hn_dev_obj;
-	netvsc_dev  	*net_dev;
+struct hn_rx_ring {
+	struct lro_ctrl	hn_lro;
 
-	struct hn_txdesc *hn_txdesc;
-	bus_dma_tag_t	hn_tx_data_dtag;
-	bus_dma_tag_t	hn_tx_rndis_dtag;
-	int		hn_tx_chimney_size;
-	int		hn_tx_chimney_max;
-	uint64_t	hn_csum_assist;
+	/* Trust csum verification on host side */
+	int		hn_trust_hcsum;	/* HN_TRUST_HCSUM_ */
+
+	u_long		hn_csum_ip;
+	u_long		hn_csum_tcp;
+	u_long		hn_csum_udp;
+	u_long		hn_csum_trusted;
+	u_long		hn_lro_tried;
+	u_long		hn_small_pkts;
+} __aligned(CACHE_LINE_SIZE);
+
+#define HN_TRUST_HCSUM_IP	0x0001
+#define HN_TRUST_HCSUM_TCP	0x0002
+#define HN_TRUST_HCSUM_UDP	0x0004
 
+struct hn_tx_ring {
 	struct mtx	hn_txlist_spin;
 	struct hn_txdesc_list hn_txlist;
 	int		hn_txdesc_cnt;
@@ -1025,32 +1019,57 @@ typedef struct hn_softc {
 	int		hn_txeof;
 
 	int		hn_sched_tx;
-	int		hn_direct_tx_size;
 	struct taskqueue *hn_tx_taskq;
 	struct task	hn_start_task;
 	struct task	hn_txeof_task;
 
-	struct lro_ctrl	hn_lro;
+	struct mtx	hn_tx_lock;
+	struct hn_softc	*hn_sc;
 
-	/* Trust csum verification on host side */
-	int		hn_trust_hcsum;	/* HN_TRUST_HCSUM_ */
+	int		hn_direct_tx_size;
+	int		hn_tx_chimney_size;
+	bus_dma_tag_t	hn_tx_data_dtag;
+	uint64_t	hn_csum_assist;
 
-	u_long		hn_csum_ip;
-	u_long		hn_csum_tcp;
-	u_long		hn_csum_udp;
-	u_long		hn_csum_trusted;
-	u_long		hn_lro_tried;
-	u_long		hn_small_pkts;
 	u_long		hn_no_txdescs;
 	u_long		hn_send_failed;
 	u_long		hn_txdma_failed;
 	u_long		hn_tx_collapsed;
 	u_long		hn_tx_chimney;
-} hn_softc_t;
 
-#define HN_TRUST_HCSUM_IP	0x0001
-#define HN_TRUST_HCSUM_TCP	0x0002
-#define HN_TRUST_HCSUM_UDP	0x0004
+	/* Rarely used stuffs */
+	struct hn_txdesc *hn_txdesc;
+	bus_dma_tag_t	hn_tx_rndis_dtag;
+	struct sysctl_oid *hn_tx_sysctl_tree;
+} __aligned(CACHE_LINE_SIZE);
+
+/*
+ * Device-specific softc structure
+ */
+typedef struct hn_softc {
+	struct ifnet    *hn_ifp;
+	struct arpcom   arpcom;
+	struct ifmedia	hn_media;
+	device_t        hn_dev;
+	uint8_t         hn_unit;
+	int             hn_carrier;
+	int             hn_if_flags;
+	struct mtx      hn_lock;
+	int             hn_initdone;
+	/* See hv_netvsc_drv_freebsd.c for rules on how to use */
+	int             temp_unusable;
+	struct hv_device  *hn_dev_obj;
+	netvsc_dev  	*net_dev;
+
+	int		hn_rx_ring_cnt;
+	struct hn_rx_ring *hn_rx_ring;
+
+	int		hn_tx_ring_cnt;
+	struct hn_tx_ring *hn_tx_ring;
+	int		hn_tx_chimney_max;
+	struct taskqueue *hn_tx_taskq;
+	struct sysctl_oid *hn_tx_sysctl_tree;
+} hn_softc_t;
 
 /*
  * Externs

Modified: stable/10/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
==============================================================================
--- stable/10/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c	Mon Jun 13 07:03:00 2016	(r301860)
+++ stable/10/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c	Mon Jun 13 07:30:54 2016	(r301861)
@@ -153,7 +153,7 @@ __FBSDID("$FreeBSD$");
 struct hn_txdesc {
 	SLIST_ENTRY(hn_txdesc) link;
 	struct mbuf	*m;
-	struct hn_softc	*sc;
+	struct hn_tx_ring *txr;
 	int		refs;
 	uint32_t	flags;		/* HN_TXD_FLAG_ */
 	netvsc_packet	netvsc_pkt;	/* XXX to be removed */
@@ -193,7 +193,6 @@ struct hn_txdesc {
 #define NV_LOCK_INIT(_sc, _name) \
 	    mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
 #define NV_LOCK(_sc)		mtx_lock(&(_sc)->hn_lock)
-#define NV_TRYLOCK(_sc)		mtx_trylock(&(_sc)->hn_lock)
 #define NV_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->hn_lock, MA_OWNED)
 #define NV_UNLOCK(_sc)		mtx_unlock(&(_sc)->hn_lock)
 #define NV_LOCK_DESTROY(_sc)	mtx_destroy(&(_sc)->hn_lock)
@@ -205,41 +204,57 @@ struct hn_txdesc {
 
 int hv_promisc_mode = 0;    /* normal mode by default */
 
+SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface");
+
 /* Trust tcp segements verification on host side. */
 static int hn_trust_hosttcp = 1;
-TUNABLE_INT("dev.hn.trust_hosttcp", &hn_trust_hosttcp);
+SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
+    &hn_trust_hosttcp, 0,
+    "Trust tcp segement verification on host side, "
+    "when csum info is missing (global setting)");
 
 /* Trust udp datagrams verification on host side. */
 static int hn_trust_hostudp = 1;
-TUNABLE_INT("dev.hn.trust_hostudp", &hn_trust_hostudp);
+SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
+    &hn_trust_hostudp, 0,
+    "Trust udp datagram verification on host side, "
+    "when csum info is missing (global setting)");
 
 /* Trust ip packets verification on host side. */
 static int hn_trust_hostip = 1;
-TUNABLE_INT("dev.hn.trust_hostip", &hn_trust_hostip);
+SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
+    &hn_trust_hostip, 0,
+    "Trust ip packet verification on host side, "
+    "when csum info is missing (global setting)");
 
 #if __FreeBSD_version >= 1100045
 /* Limit TSO burst size */
 static int hn_tso_maxlen = 0;
-TUNABLE_INT("dev.hn.tso_maxlen", &hn_tso_maxlen);
+SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
+    &hn_tso_maxlen, 0, "TSO burst limit");
 #endif
 
 /* Limit chimney send size */
 static int hn_tx_chimney_size = 0;
-TUNABLE_INT("dev.hn.tx_chimney_size", &hn_tx_chimney_size);
+SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
+    &hn_tx_chimney_size, 0, "Chimney send packet size limit");
 
 /* Limit the size of packet for direct transmission */
 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
-TUNABLE_INT("dev.hn.direct_tx_size", &hn_direct_tx_size);
+SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
+    &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
 
 #if defined(INET) || defined(INET6)
 #if __FreeBSD_version >= 1100095
 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
-TUNABLE_INT("dev.hn.lro_entry_count", &hn_lro_entry_count);
+SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
+    &hn_lro_entry_count, 0, "LRO entry count");
 #endif
 #endif
 
 static int hn_share_tx_taskq = 0;
-TUNABLE_INT("hw.hn.share_tx_taskq", &hn_share_tx_taskq);
+SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
+    &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
 
 static struct taskqueue	*hn_tx_taskq;
 
@@ -250,9 +265,9 @@ static void hn_stop(hn_softc_t *sc);
 static void hn_ifinit_locked(hn_softc_t *sc);
 static void hn_ifinit(void *xsc);
 static int  hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
-static int hn_start_locked(struct ifnet *ifp, int len);
+static int hn_start_locked(struct hn_tx_ring *txr, int len);
 static void hn_start(struct ifnet *ifp);
-static void hn_start_txeof(struct ifnet *ifp);
+static void hn_start_txeof(struct hn_tx_ring *);
 static int hn_ifmedia_upd(struct ifnet *ifp);
 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
 #if __FreeBSD_version >= 1100099
@@ -261,12 +276,22 @@ static int hn_lro_ackcnt_sysctl(SYSCTL_H
 #endif
 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
+static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
+static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
+static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
+static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
 static int hn_check_iplen(const struct mbuf *, int);
-static int hn_create_tx_ring(struct hn_softc *sc);
-static void hn_destroy_tx_ring(struct hn_softc *sc);
+static int hn_create_tx_ring(struct hn_softc *, int);
+static void hn_destroy_tx_ring(struct hn_tx_ring *);
+static int hn_create_tx_data(struct hn_softc *);
+static void hn_destroy_tx_data(struct hn_softc *);
 static void hn_start_taskfunc(void *xsc, int pending);
 static void hn_txeof_taskfunc(void *xsc, int pending);
-static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **);
+static void hn_stop_tx_tasks(struct hn_softc *);
+static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
+static void hn_create_rx_data(struct hn_softc *sc);
+static void hn_destroy_rx_data(struct hn_softc *sc);
+static void hn_set_tx_chimney_size(struct hn_softc *, int);
 
 static int
 hn_ifmedia_upd(struct ifnet *ifp __unused)
@@ -332,17 +357,10 @@ netvsc_attach(device_t dev)
 	hn_softc_t *sc;
 	int unit = device_get_unit(dev);
 	struct ifnet *ifp = NULL;
-	struct sysctl_oid_list *child;
-	struct sysctl_ctx_list *ctx;
 	int error;
 #if __FreeBSD_version >= 1100045
 	int tso_maxlen;
 #endif
-#if defined(INET) || defined(INET6)
-#if __FreeBSD_version >= 1100095
-	int lroent_cnt;
-#endif
-#endif
 
 	sc = device_get_softc(dev);
 	if (sc == NULL) {
@@ -352,29 +370,15 @@ netvsc_attach(device_t dev)
 	bzero(sc, sizeof(hn_softc_t));
 	sc->hn_unit = unit;
 	sc->hn_dev = dev;
-	sc->hn_direct_tx_size = hn_direct_tx_size;
-	if (hn_trust_hosttcp)
-		sc->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
-	if (hn_trust_hostudp)
-		sc->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
-	if (hn_trust_hostip)
-		sc->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
 
 	if (hn_tx_taskq == NULL) {
-		sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK,
+		sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
 		    taskqueue_thread_enqueue, &sc->hn_tx_taskq);
 		taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
 		    device_get_nameunit(dev));
 	} else {
 		sc->hn_tx_taskq = hn_tx_taskq;
 	}
-	TASK_INIT(&sc->hn_start_task, 0, hn_start_taskfunc, sc);
-	TASK_INIT(&sc->hn_txeof_task, 0, hn_txeof_taskfunc, sc);
-
-	error = hn_create_tx_ring(sc);
-	if (error)
-		goto failed;
-
 	NV_LOCK_INIT(sc, "NetVSCLock");
 
 	sc->hn_dev_obj = device_ctx;
@@ -382,6 +386,12 @@ netvsc_attach(device_t dev)
 	ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
 	ifp->if_softc = sc;
 
+	error = hn_create_tx_data(sc);
+	if (error)
+		goto failed;
+
+	hn_create_rx_data(sc);
+
 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 	ifp->if_dunit = unit;
 	ifp->if_dname = NETVSC_DEVNAME;
@@ -412,12 +422,7 @@ netvsc_attach(device_t dev)
 	ifp->if_capenable |=
 	    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
 	    IFCAP_LRO;
-
-	if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1)
-		sc->hn_csum_assist = HN_CSUM_ASSIST;
-	else
-		sc->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
-	ifp->if_hwassist = sc->hn_csum_assist | CSUM_TSO;
+	ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
 
 	error = hv_rf_on_device_add(device_ctx, &device_info);
 	if (error)
@@ -427,24 +432,6 @@ netvsc_attach(device_t dev)
 		sc->hn_carrier = 1;
 	}
 
-#if defined(INET) || defined(INET6)
-#if __FreeBSD_version >= 1100095
-	lroent_cnt = hn_lro_entry_count;
-	if (lroent_cnt < TCP_LRO_ENTRIES)
-		lroent_cnt = TCP_LRO_ENTRIES;
-	tcp_lro_init_args(&sc->hn_lro, ifp, lroent_cnt, 0);
-	device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
-#else
-	tcp_lro_init(&sc->hn_lro);
-	/* Driver private LRO settings */
-	sc->hn_lro.ifp = ifp;
-#endif
-#if __FreeBSD_version >= 1100099
-	sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
-	sc->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
-#endif
-#endif	/* INET || INET6 */
-
 #if __FreeBSD_version >= 1100045
 	tso_maxlen = hn_tso_maxlen;
 	if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
@@ -464,134 +451,14 @@ netvsc_attach(device_t dev)
 #endif
 
 	sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
-	sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
+	hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
 	if (hn_tx_chimney_size > 0 &&
 	    hn_tx_chimney_size < sc->hn_tx_chimney_max)
-		sc->hn_tx_chimney_size = hn_tx_chimney_size;
-
-	/*
-	 * Always schedule transmission instead of trying
-	 * to do direct transmission.  This one gives the
-	 * best performance so far.
-	 */
-	sc->hn_sched_tx = 1;
-
-	ctx = device_get_sysctl_ctx(dev);
-	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
-
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "lro_queued",
-	    CTLFLAG_RW, &sc->hn_lro.lro_queued, 0, "LRO queued");
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "lro_flushed",
-	    CTLFLAG_RW, &sc->hn_lro.lro_flushed, 0, "LRO flushed");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "lro_tried",
-	    CTLFLAG_RW, &sc->hn_lro_tried, "# of LRO tries");
-#if __FreeBSD_version >= 1100099
-	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
-	    CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU",
-	    "Max # of data bytes to be aggregated by LRO");
-	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
-	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I",
-	    "Max # of ACKs to be aggregated by LRO");
-#endif
-	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
-	    CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP,
-	    hn_trust_hcsum_sysctl, "I",
-	    "Trust tcp segement verification on host side, "
-	    "when csum info is missing");
-	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
-	    CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP,
-	    hn_trust_hcsum_sysctl, "I",
-	    "Trust udp datagram verification on host side, "
-	    "when csum info is missing");
-	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
-	    CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP,
-	    hn_trust_hcsum_sysctl, "I",
-	    "Trust ip packet verification on host side, "
-	    "when csum info is missing");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_ip",
-	    CTLFLAG_RW, &sc->hn_csum_ip, "RXCSUM IP");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_tcp",
-	    CTLFLAG_RW, &sc->hn_csum_tcp, "RXCSUM TCP");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_udp",
-	    CTLFLAG_RW, &sc->hn_csum_udp, "RXCSUM UDP");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_trusted",
-	    CTLFLAG_RW, &sc->hn_csum_trusted,
-	    "# of packets that we trust host's csum verification");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "small_pkts",
-	    CTLFLAG_RW, &sc->hn_small_pkts, "# of small packets received");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs",
-	    CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed",
-	    CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed",
-	    CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed",
-	    CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_chimney",
-	    CTLFLAG_RW, &sc->hn_tx_chimney, "# of chimney send");
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
-	    CTLFLAG_RD, &sc->hn_txdesc_cnt, 0, "# of total TX descs");
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
-	    CTLFLAG_RD, &sc->hn_txdesc_avail, 0, "# of available TX descs");
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
-	    CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
-	    "Chimney send packet size upper boundary");
-	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
-	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
-	    "I", "Chimney send packet size limit");
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "direct_tx_size",
-	    CTLFLAG_RW, &sc->hn_direct_tx_size, 0,
-	    "Size of the packet for direct transmission");
-	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "sched_tx",
-	    CTLFLAG_RW, &sc->hn_sched_tx, 0,
-	    "Always schedule transmission "
-	    "instead of doing direct transmission");
-
-	if (unit == 0) {
-		struct sysctl_ctx_list *dc_ctx;
-		struct sysctl_oid_list *dc_child;
-		devclass_t dc;
-
-		/*
-		 * Add sysctl nodes for devclass
-		 */
-		dc = device_get_devclass(dev);
-		dc_ctx = devclass_get_sysctl_ctx(dc);
-		dc_child = SYSCTL_CHILDREN(devclass_get_sysctl_tree(dc));
-
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hosttcp",
-		    CTLFLAG_RD, &hn_trust_hosttcp, 0,
-		    "Trust tcp segement verification on host side, "
-		    "when csum info is missing (global setting)");
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hostudp",
-		    CTLFLAG_RD, &hn_trust_hostudp, 0,
-		    "Trust udp datagram verification on host side, "
-		    "when csum info is missing (global setting)");
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hostip",
-		    CTLFLAG_RD, &hn_trust_hostip, 0,
-		    "Trust ip packet verification on host side, "
-		    "when csum info is missing (global setting)");
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tx_chimney_size",
-		    CTLFLAG_RD, &hn_tx_chimney_size, 0,
-		    "Chimney send packet size limit");
-#if __FreeBSD_version >= 1100045
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tso_maxlen",
-		    CTLFLAG_RD, &hn_tso_maxlen, 0, "TSO burst limit");
-#endif
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "direct_tx_size",
-		    CTLFLAG_RD, &hn_direct_tx_size, 0,
-		    "Size of the packet for direct transmission");
-#if defined(INET) || defined(INET6)
-#if __FreeBSD_version >= 1100095
-		SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "lro_entry_count",
-		    CTLFLAG_RD, &hn_lro_entry_count, 0, "LRO entry count");
-#endif
-#endif
-	}
+		hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
 
 	return (0);
 failed:
-	hn_destroy_tx_ring(sc);
+	hn_destroy_tx_data(sc);
 	if (ifp != NULL)
 		if_free(ifp);
 	return (error);
@@ -622,16 +489,14 @@ netvsc_detach(device_t dev)
 
 	hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
 
-	taskqueue_drain(sc->hn_tx_taskq, &sc->hn_start_task);
-	taskqueue_drain(sc->hn_tx_taskq, &sc->hn_txeof_task);
-	if (sc->hn_tx_taskq != hn_tx_taskq)
-		taskqueue_free(sc->hn_tx_taskq);
+	hn_stop_tx_tasks(sc);
 
 	ifmedia_removeall(&sc->hn_media);
-#if defined(INET) || defined(INET6)
-	tcp_lro_free(&sc->hn_lro);
-#endif
-	hn_destroy_tx_ring(sc);
+	hn_destroy_rx_data(sc);
+	hn_destroy_tx_data(sc);
+
+	if (sc->hn_tx_taskq != hn_tx_taskq)
+		taskqueue_free(sc->hn_tx_taskq);
 
 	return (0);
 }
@@ -646,13 +511,13 @@ netvsc_shutdown(device_t dev)
 }
 
 static __inline int
-hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd,
+hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
     struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
 {
 	struct mbuf *m = *m_head;
 	int error;
 
-	error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag, txd->data_dmap,
+	error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
 	    m, segs, nsegs, BUS_DMA_NOWAIT);
 	if (error == EFBIG) {
 		struct mbuf *m_new;
@@ -662,13 +527,13 @@ hn_txdesc_dmamap_load(struct hn_softc *s
 			return ENOBUFS;
 		else
 			*m_head = m = m_new;
-		sc->hn_tx_collapsed++;
+		txr->hn_tx_collapsed++;
 
-		error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag,
+		error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
 		    txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
 	}
 	if (!error) {
-		bus_dmamap_sync(sc->hn_tx_data_dtag, txd->data_dmap,
+		bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
 		    BUS_DMASYNC_PREWRITE);
 		txd->flags |= HN_TXD_FLAG_DMAMAP;
 	}
@@ -676,20 +541,20 @@ hn_txdesc_dmamap_load(struct hn_softc *s
 }
 
 static __inline void
-hn_txdesc_dmamap_unload(struct hn_softc *sc, struct hn_txdesc *txd)
+hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
 {
 
 	if (txd->flags & HN_TXD_FLAG_DMAMAP) {
-		bus_dmamap_sync(sc->hn_tx_data_dtag,
+		bus_dmamap_sync(txr->hn_tx_data_dtag,
 		    txd->data_dmap, BUS_DMASYNC_POSTWRITE);
-		bus_dmamap_unload(sc->hn_tx_data_dtag,
+		bus_dmamap_unload(txr->hn_tx_data_dtag,
 		    txd->data_dmap);
 		txd->flags &= ~HN_TXD_FLAG_DMAMAP;
 	}
 }
 
 static __inline int
-hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd)
+hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
 {
 
 	KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
@@ -699,7 +564,7 @@ hn_txdesc_put(struct hn_softc *sc, struc
 	if (atomic_fetchadd_int(&txd->refs, -1) != 1)
 		return 0;
 
-	hn_txdesc_dmamap_unload(sc, txd);
+	hn_txdesc_dmamap_unload(txr, txd);
 	if (txd->m != NULL) {
 		m_freem(txd->m);
 		txd->m = NULL;
@@ -707,31 +572,31 @@ hn_txdesc_put(struct hn_softc *sc, struc
 
 	txd->flags |= HN_TXD_FLAG_ONLIST;
 
-	mtx_lock_spin(&sc->hn_txlist_spin);
-	KASSERT(sc->hn_txdesc_avail >= 0 &&
-	    sc->hn_txdesc_avail < sc->hn_txdesc_cnt,
-	    ("txdesc_put: invalid txd avail %d", sc->hn_txdesc_avail));
-	sc->hn_txdesc_avail++;
-	SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
-	mtx_unlock_spin(&sc->hn_txlist_spin);
+	mtx_lock_spin(&txr->hn_txlist_spin);
+	KASSERT(txr->hn_txdesc_avail >= 0 &&
+	    txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
+	    ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
+	txr->hn_txdesc_avail++;
+	SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
+	mtx_unlock_spin(&txr->hn_txlist_spin);
 
 	return 1;
 }
 
 static __inline struct hn_txdesc *
-hn_txdesc_get(struct hn_softc *sc)
+hn_txdesc_get(struct hn_tx_ring *txr)
 {
 	struct hn_txdesc *txd;
 
-	mtx_lock_spin(&sc->hn_txlist_spin);
-	txd = SLIST_FIRST(&sc->hn_txlist);
+	mtx_lock_spin(&txr->hn_txlist_spin);
+	txd = SLIST_FIRST(&txr->hn_txlist);
 	if (txd != NULL) {
-		KASSERT(sc->hn_txdesc_avail > 0,
-		    ("txdesc_get: invalid txd avail %d", sc->hn_txdesc_avail));
-		sc->hn_txdesc_avail--;
-		SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
+		KASSERT(txr->hn_txdesc_avail > 0,
+		    ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
+		txr->hn_txdesc_avail--;
+		SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
 	}
-	mtx_unlock_spin(&sc->hn_txlist_spin);
+	mtx_unlock_spin(&txr->hn_txlist_spin);
 
 	if (txd != NULL) {
 		KASSERT(txd->m == NULL && txd->refs == 0 &&
@@ -763,22 +628,24 @@ netvsc_xmit_completion(void *context)
 {
 	netvsc_packet *packet = context;
 	struct hn_txdesc *txd;
-	struct hn_softc *sc;
+	struct hn_tx_ring *txr;
 
 	txd = (struct hn_txdesc *)(uintptr_t)
 	    packet->compl.send.send_completion_tid;
 
-	sc = txd->sc;
-	sc->hn_txeof = 1;
-	hn_txdesc_put(sc, txd);
+	txr = txd->txr;
+	txr->hn_txeof = 1;
+	hn_txdesc_put(txr, txd);
 }
 
 void
 netvsc_channel_rollup(struct hv_device *device_ctx)
 {
 	struct hn_softc *sc = device_get_softc(device_ctx->device);
+	struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; /* TODO: vRSS */
 #if defined(INET) || defined(INET6)
-	struct lro_ctrl *lro = &sc->hn_lro;
+	struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */
+	struct lro_ctrl *lro = &rxr->hn_lro;
 	struct lro_entry *queued;
 
 	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
@@ -787,11 +654,11 @@ netvsc_channel_rollup(struct hv_device *
 	}
 #endif
 
-	if (!sc->hn_txeof)
+	if (!txr->hn_txeof)
 		return;
 
-	sc->hn_txeof = 0;
-	hn_start_txeof(sc->hn_ifp);
+	txr->hn_txeof = 0;
+	hn_start_txeof(txr);
 }
 
 /*
@@ -799,7 +666,7 @@ netvsc_channel_rollup(struct hv_device *
  * If this function fails, then both txd and m_head0 will be freed.
  */
 static int
-hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0)
+hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
 {
 	bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
 	int error, nsegs, i;
@@ -902,7 +769,7 @@ hn_encap(struct hn_softc *sc, struct hn_
 #endif
 		tso_info->lso_v2_xmit.tcp_header_offset = 0;
 		tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
-	} else if (m_head->m_pkthdr.csum_flags & sc->hn_csum_assist) {
+	} else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
 		rndis_tcp_ip_csum_info *csum_info;
 
 		rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
@@ -929,8 +796,8 @@ hn_encap(struct hn_softc *sc, struct hn_
 	/*
 	 * Chimney send, if the packet could fit into one chimney buffer.
 	 */
-	if (packet->tot_data_buf_len < sc->hn_tx_chimney_size) {
-		netvsc_dev *net_dev = sc->net_dev;
+	if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
+		netvsc_dev *net_dev = txr->hn_sc->net_dev;
 		uint32_t send_buf_section_idx;
 
 		send_buf_section_idx =
@@ -949,12 +816,12 @@ hn_encap(struct hn_softc *sc, struct hn_
 			packet->send_buf_section_size =
 			    packet->tot_data_buf_len;
 			packet->page_buf_count = 0;
-			sc->hn_tx_chimney++;
+			txr->hn_tx_chimney++;
 			goto done;
 		}
 	}
 
-	error = hn_txdesc_dmamap_load(sc, txd, &m_head, segs, &nsegs);
+	error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
 	if (error) {
 		int freed;
 
@@ -964,12 +831,12 @@ hn_encap(struct hn_softc *sc, struct hn_
 		m_freem(m_head);
 		*m_head0 = NULL;
 
-		freed = hn_txdesc_put(sc, txd);
+		freed = hn_txdesc_put(txr, txd);
 		KASSERT(freed != 0,
 		    ("fail to free txd upon txdma error"));
 
-		sc->hn_txdma_failed++;
-		if_inc_counter(sc->hn_ifp, IFCOUNTER_OERRORS, 1);
+		txr->hn_txdma_failed++;
+		if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
 		return error;
 	}
 	*m_head0 = m_head;
@@ -1012,11 +879,15 @@ done:
  * Start a transmit of one or more packets
  */
 static int
-hn_start_locked(struct ifnet *ifp, int len)
+hn_start_locked(struct hn_tx_ring *txr, int len)
 {
-	struct hn_softc *sc = ifp->if_softc;
+	struct hn_softc *sc = txr->hn_sc;
+	struct ifnet *ifp = sc->hn_ifp;
 	struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
 
+	KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
+	mtx_assert(&txr->hn_tx_lock, MA_OWNED);
+
 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 	    IFF_DRV_RUNNING)
 		return 0;
@@ -1040,15 +911,15 @@ hn_start_locked(struct ifnet *ifp, int l
 			return 1;
 		}
 
-		txd = hn_txdesc_get(sc);
+		txd = hn_txdesc_get(txr);
 		if (txd == NULL) {
-			sc->hn_no_txdescs++;
+			txr->hn_no_txdescs++;
 			IF_PREPEND(&ifp->if_snd, m_head);
 			atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
 			break;
 		}
 
-		error = hn_encap(sc, txd, &m_head);
+		error = hn_encap(txr, txd, &m_head);
 		if (error) {
 			/* Both txd and m_head are freed */
 			continue;
@@ -1063,7 +934,7 @@ again:
 			ETHER_BPF_MTAP(ifp, m_head);
 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 		}
-		hn_txdesc_put(sc, txd);
+		hn_txdesc_put(txr, txd);
 
 		if (__predict_false(error)) {
 			int freed;
@@ -1075,9 +946,9 @@ again:
 			 * commands to run?  Ask netvsc_channel_rollup()
 			 * to kick start later.
 			 */
-			sc->hn_txeof = 1;
+			txr->hn_txeof = 1;
 			if (!send_failed) {
-				sc->hn_send_failed++;
+				txr->hn_send_failed++;
 				send_failed = 1;
 				/*
 				 * Try sending again after set hn_txeof;
@@ -1094,11 +965,11 @@ again:
 			 * DMA map in hn_txdesc_put(), if it was loaded.
 			 */
 			txd->m = NULL;
-			freed = hn_txdesc_put(sc, txd);
+			freed = hn_txdesc_put(txr, txd);
 			KASSERT(freed != 0,
 			    ("fail to free txd upon send error"));
 
-			sc->hn_send_failed++;
+			txr->hn_send_failed++;
 			IF_PREPEND(&ifp->if_snd, m_head);
 			atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
 			break;
@@ -1194,10 +1065,10 @@ int
 netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet,
     rndis_tcp_ip_csum_info *csum_info)
 {
-	hn_softc_t *sc = (hn_softc_t *)device_get_softc(device_ctx->device);
+	struct hn_softc *sc = device_get_softc(device_ctx->device);
+	struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */
 	struct mbuf *m_new;
 	struct ifnet *ifp;
-	device_t dev = device_ctx->device;
 	int size, do_lro = 0, do_csum = 1;
 
 	if (sc == NULL) {
@@ -1224,7 +1095,7 @@ netvsc_recv(struct hv_device *device_ctx
 		memcpy(mtod(m_new, void *), packet->data,
 		    packet->tot_data_buf_len);
 		m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
-		sc->hn_small_pkts++;
+		rxr->hn_small_pkts++;
 	} else {
 		/*
 		 * Get an mbuf with a cluster.  For packets 2K or less,
@@ -1240,7 +1111,7 @@ netvsc_recv(struct hv_device *device_ctx
 
 		m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
 		if (m_new == NULL) {
-			device_printf(dev, "alloc mbuf failed.\n");
+			if_printf(ifp, "alloc mbuf failed.\n");
 			return (0);
 		}
 
@@ -1257,7 +1128,7 @@ netvsc_recv(struct hv_device *device_ctx
 		if (csum_info->receive.ip_csum_succeeded && do_csum) {
 			m_new->m_pkthdr.csum_flags |=
 			    (CSUM_IP_CHECKED | CSUM_IP_VALID);
-			sc->hn_csum_ip++;
+			rxr->hn_csum_ip++;
 		}
 
 		/* TCP/UDP csum offload */
@@ -1267,9 +1138,9 @@ netvsc_recv(struct hv_device *device_ctx
 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 			m_new->m_pkthdr.csum_data = 0xffff;
 			if (csum_info->receive.tcp_csum_succeeded)
-				sc->hn_csum_tcp++;
+				rxr->hn_csum_tcp++;
 			else
-				sc->hn_csum_udp++;
+				rxr->hn_csum_udp++;
 		}
 
 		if (csum_info->receive.ip_csum_succeeded &&
@@ -1301,8 +1172,9 @@ netvsc_recv(struct hv_device *device_ctx
 			pr = hn_check_iplen(m_new, hoff);
 			if (pr == IPPROTO_TCP) {
 				if (do_csum &&
-				    (sc->hn_trust_hcsum & HN_TRUST_HCSUM_TCP)) {
-					sc->hn_csum_trusted++;
+				    (rxr->hn_trust_hcsum &
+				     HN_TRUST_HCSUM_TCP)) {
+					rxr->hn_csum_trusted++;
 					m_new->m_pkthdr.csum_flags |=
 					   (CSUM_IP_CHECKED | CSUM_IP_VALID |
 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
@@ -1312,16 +1184,17 @@ netvsc_recv(struct hv_device *device_ctx
 				do_lro = 1;
 			} else if (pr == IPPROTO_UDP) {
 				if (do_csum &&
-				    (sc->hn_trust_hcsum & HN_TRUST_HCSUM_UDP)) {
-					sc->hn_csum_trusted++;
+				    (rxr->hn_trust_hcsum &
+				     HN_TRUST_HCSUM_UDP)) {
+					rxr->hn_csum_trusted++;
 					m_new->m_pkthdr.csum_flags |=
 					   (CSUM_IP_CHECKED | CSUM_IP_VALID |
 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 					m_new->m_pkthdr.csum_data = 0xffff;
 				}
 			} else if (pr != IPPROTO_DONE && do_csum &&
-			    (sc->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
-				sc->hn_csum_trusted++;
+			    (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
+				rxr->hn_csum_trusted++;
 				m_new->m_pkthdr.csum_flags |=
 				    (CSUM_IP_CHECKED | CSUM_IP_VALID);
 			}
@@ -1343,10 +1216,10 @@ skip:
 
 	if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
 #if defined(INET) || defined(INET6)
-		struct lro_ctrl *lro = &sc->hn_lro;
+		struct lro_ctrl *lro = &rxr->hn_lro;
 
 		if (lro->lro_cnt) {
-			sc->hn_lro_tried++;
+			rxr->hn_lro_tried++;
 			if (tcp_lro_rx(lro, m_new, 0) == 0) {
 				/* DONE! */
 				return 0;
@@ -1427,8 +1300,16 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, 
 		 * Make sure that LRO aggregation length limit is still
 		 * valid, after the MTU change.
 		 */
-		if (sc->hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp))
-			sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_MIN(ifp);
+		NV_LOCK(sc);
+		if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
+		    HN_LRO_LENLIM_MIN(ifp)) {
+			int i;
+			for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
+				sc->hn_rx_ring[i].hn_lro.lro_length_lim =
+				    HN_LRO_LENLIM_MIN(ifp);
+			}
+		}
+		NV_UNLOCK(sc);
 #endif
 
 		do {
@@ -1469,8 +1350,10 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, 
 		}
 
 		sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
-		if (sc->hn_tx_chimney_size > sc->hn_tx_chimney_max)
-			sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
+		if (sc->hn_tx_ring[0].hn_tx_chimney_size >
+		    sc->hn_tx_chimney_max)
+			hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
+
 		hn_ifinit_locked(sc);
 
 		NV_LOCK(sc);
@@ -1535,10 +1418,13 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, 
 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 		if (mask & IFCAP_TXCSUM) {
 			ifp->if_capenable ^= IFCAP_TXCSUM;
-			if (ifp->if_capenable & IFCAP_TXCSUM)
-				ifp->if_hwassist |= sc->hn_csum_assist;
-			else
-				ifp->if_hwassist &= ~sc->hn_csum_assist;
+			if (ifp->if_capenable & IFCAP_TXCSUM) {
+				ifp->if_hwassist |=
+				    sc->hn_tx_ring[0].hn_csum_assist;
+			} else {
+				ifp->if_hwassist &=
+				    ~sc->hn_tx_ring[0].hn_csum_assist;
+			}
 		}
 
 		if (mask & IFCAP_RXCSUM)
@@ -1621,50 +1507,54 @@ static void
 hn_start(struct ifnet *ifp)
 {
 	struct hn_softc *sc = ifp->if_softc;
+	struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
 
-	if (sc->hn_sched_tx)
+	if (txr->hn_sched_tx)
 		goto do_sched;
 
-	if (NV_TRYLOCK(sc)) {
+	if (mtx_trylock(&txr->hn_tx_lock)) {
 		int sched;
 
-		sched = hn_start_locked(ifp, sc->hn_direct_tx_size);
-		NV_UNLOCK(sc);
+		sched = hn_start_locked(txr, txr->hn_direct_tx_size);
+		mtx_unlock(&txr->hn_tx_lock);
 		if (!sched)
 			return;
 	}
 do_sched:
-	taskqueue_enqueue_fast(sc->hn_tx_taskq, &sc->hn_start_task);
+	taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_start_task);
 }
 
 static void
-hn_start_txeof(struct ifnet *ifp)
+hn_start_txeof(struct hn_tx_ring *txr)
 {
-	struct hn_softc *sc = ifp->if_softc;
+	struct hn_softc *sc = txr->hn_sc;
+	struct ifnet *ifp = sc->hn_ifp;
 
-	if (sc->hn_sched_tx)
+	KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
+
+	if (txr->hn_sched_tx)
 		goto do_sched;
 
-	if (NV_TRYLOCK(sc)) {
+	if (mtx_trylock(&txr->hn_tx_lock)) {
 		int sched;
 
 		atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
-		sched = hn_start_locked(ifp, sc->hn_direct_tx_size);
-		NV_UNLOCK(sc);
+		sched = hn_start_locked(txr, txr->hn_direct_tx_size);
+		mtx_unlock(&txr->hn_tx_lock);
 		if (sched) {
-			taskqueue_enqueue_fast(sc->hn_tx_taskq,
-			    &sc->hn_start_task);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201606130730.u5D7Utc6086560>