From owner-svn-src-all@FreeBSD.ORG Sat May 3 19:40:42 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 115576B; Sat, 3 May 2014 19:40:42 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id F13101D44; Sat, 3 May 2014 19:40:41 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s43JefwJ028117; Sat, 3 May 2014 19:40:41 GMT (envelope-from bryanv@svn.freebsd.org) Received: (from bryanv@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s43JefMW028114; Sat, 3 May 2014 19:40:41 GMT (envelope-from bryanv@svn.freebsd.org) Message-Id: <201405031940.s43JefMW028114@svn.freebsd.org> From: Bryan Venteicher Date: Sat, 3 May 2014 19:40:41 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r265286 - stable/10/sys/dev/virtio/network X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.17 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 03 May 2014 19:40:42 -0000 Author: bryanv Date: Sat May 3 19:40:41 2014 New Revision: 265286 URL: http://svnweb.freebsd.org/changeset/base/265286 Log: MFC r261150, r261151, r261164, r261166, r261167, r261168, r261394, r261395: - Read and write the MAC address in the config space byte by byte - Also include the mbuf's csum_flags in an assert message - Remove stray space - Move duplicated transmit start code into a single function - Avoid queue unlock followed by relock when the enable interrupt race is lost - Check for a full virtqueue in the multiqueue transmit path - Do not place the sglist used for Rx/Tx on the stack - Use m_defrag() instead of m_collapse() to compact a long mbuf chain Modified: stable/10/sys/dev/virtio/network/if_vtnet.c stable/10/sys/dev/virtio/network/if_vtnetvar.h Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/dev/virtio/network/if_vtnet.c ============================================================================== --- stable/10/sys/dev/virtio/network/if_vtnet.c Sat May 3 18:50:47 2014 (r265285) +++ stable/10/sys/dev/virtio/network/if_vtnet.c Sat May 3 19:40:41 2014 (r265286) @@ -145,6 +145,7 @@ static int vtnet_txq_mq_start_locked(str static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *); static void vtnet_txq_tq_deferred(void *, int); #endif +static void vtnet_txq_start(struct vtnet_txq *); static void vtnet_txq_tq_intr(void *, int); static void vtnet_txq_eof(struct vtnet_txq *); static void vtnet_tx_vq_intr(void *); @@ -606,6 +607,20 @@ vtnet_setup_features(struct vtnet_softc } else sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); + if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) + sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; + else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) + sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; + else + sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; + + if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || + virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || + virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) + sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; + else + sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; + if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; @@ -661,6 +676,10 @@ vtnet_init_rxq(struct vtnet_softc *sc, i rxq->vtnrx_sc = sc; rxq->vtnrx_id = id; + rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); + if (rxq->vtnrx_sg == NULL) + return (ENOMEM); + TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); @@ -682,6 +701,10 @@ vtnet_init_txq(struct vtnet_softc *sc, i txq->vtntx_sc = sc; txq->vtntx_id = id; + txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); + if (txq->vtntx_sg == NULL) + return (ENOMEM); + #ifndef VTNET_LEGACY_TX txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &txq->vtntx_mtx); @@ -734,6 +757,11 @@ vtnet_destroy_rxq(struct vtnet_rxq *rxq) rxq->vtnrx_sc = NULL; rxq->vtnrx_id = -1; + if (rxq->vtnrx_sg != NULL) { + sglist_free(rxq->vtnrx_sg); + rxq->vtnrx_sg = NULL; + } + if (mtx_initialized(&rxq->vtnrx_mtx) != 0) mtx_destroy(&rxq->vtnrx_mtx); } @@ -745,6 +773,11 @@ vtnet_destroy_txq(struct vtnet_txq *txq) txq->vtntx_sc = NULL; txq->vtntx_id = -1; + if (txq->vtntx_sg != NULL) { + sglist_free(txq->vtntx_sg); + txq->vtntx_sg = NULL; + } + #ifndef VTNET_LEGACY_TX if (txq->vtntx_br != NULL) { buf_ring_free(txq->vtntx_br, M_DEVBUF); @@ -819,40 +852,27 @@ vtnet_alloc_virtqueues(struct vtnet_soft struct vq_alloc_info *info; struct vtnet_rxq *rxq; struct vtnet_txq *txq; - int i, idx, flags, nvqs, rxsegs, error; + int i, idx, flags, nvqs, error; dev = sc->vtnet_dev; flags = 0; - /* - * Indirect descriptors are not needed for the Rx virtqueue when - * mergeable buffers are negotiated. The header is placed inline - * with the data, not in a separate descriptor, and mbuf clusters - * are always physically contiguous. - */ - if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) - rxsegs = 0; - else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) - rxsegs = VTNET_MAX_RX_SEGS; - else - rxsegs = VTNET_MIN_RX_SEGS; - nvqs = sc->vtnet_max_vq_pairs * 2; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) nvqs++; - info = malloc(sizeof(struct vq_alloc_info) * nvqs , M_TEMP, M_NOWAIT); + info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { rxq = &sc->vtnet_rxqs[i]; - VQ_ALLOC_INFO_INIT(&info[idx], rxsegs, + VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); txq = &sc->vtnet_txqs[i]; - VQ_ALLOC_INFO_INIT(&info[idx+1], VTNET_MAX_TX_SEGS, + VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, vtnet_tx_vq_intr, txq, &txq->vtntx_vq, "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); } @@ -1359,14 +1379,14 @@ vtnet_rxq_replace_buf(struct vtnet_rxq * static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) { - struct sglist sg; - struct sglist_seg segs[VTNET_MAX_RX_SEGS]; struct vtnet_softc *sc; + struct sglist *sg; struct vtnet_rx_header *rxhdr; uint8_t *mdata; int offset, error; sc = rxq->vtnrx_sc; + sg = rxq->vtnrx_sg; mdata = mtod(m, uint8_t *); VTNET_RXQ_LOCK_ASSERT(rxq); @@ -1376,22 +1396,22 @@ vtnet_rxq_enqueue_buf(struct vtnet_rxq * ("%s: unexpected cluster size %d/%d", __func__, m->m_len, sc->vtnet_rx_clsize)); - sglist_init(&sg, VTNET_MAX_RX_SEGS, segs); + sglist_reset(sg); if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); rxhdr = (struct vtnet_rx_header *) mdata; - sglist_append(&sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); + sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); offset = sizeof(struct vtnet_rx_header); } else offset = 0; - sglist_append(&sg, mdata + offset, m->m_len - offset); + sglist_append(sg, mdata + offset, m->m_len - offset); if (m->m_next != NULL) { - error = sglist_append_mbuf(&sg, m->m_next); + error = sglist_append_mbuf(sg, m->m_next); MPASS(error == 0); } - error = virtqueue_enqueue(rxq->vtnrx_vq, m, &sg, 0, sg.sg_nseg); + error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); return (error); } @@ -1818,9 +1838,9 @@ vtnet_rx_vq_intr(void *xrxq) return; } -again: VTNET_RXQ_LOCK(rxq); +again: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; @@ -1834,10 +1854,11 @@ again: * This is an occasional condition or race (when !more), * so retry a few times before scheduling the taskqueue. */ - rxq->vtnrx_stats.vrxs_rescheduled++; - VTNET_RXQ_UNLOCK(rxq); if (tries++ < VTNET_INTR_DISABLE_RETRIES) goto again; + + VTNET_RXQ_UNLOCK(rxq); + rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } else VTNET_RXQ_UNLOCK(rxq); @@ -2025,7 +2046,8 @@ vtnet_txq_offload(struct vtnet_txq *txq, } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, - ("%s: mbuf %p TSO without checksum offload", __func__, m)); + ("%s: mbuf %p TSO without checksum offload %#x", + __func__, m, flags)); error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); if (error) @@ -2043,45 +2065,43 @@ static int vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, struct vtnet_tx_header *txhdr) { - struct sglist sg; - struct sglist_seg segs[VTNET_MAX_TX_SEGS]; struct vtnet_softc *sc; struct virtqueue *vq; + struct sglist *sg; struct mbuf *m; - int collapsed, error; + int error; - vq = txq->vtntx_vq; sc = txq->vtntx_sc; + vq = txq->vtntx_vq; + sg = txq->vtntx_sg; m = *m_head; - collapsed = 0; - sglist_init(&sg, VTNET_MAX_TX_SEGS, segs); - error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); - KASSERT(error == 0 && sg.sg_nseg == 1, + sglist_reset(sg); + error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); + KASSERT(error == 0 && sg->sg_nseg == 1, ("%s: error %d adding header to sglist", __func__, error)); -again: - error = sglist_append_mbuf(&sg, m); + error = sglist_append_mbuf(sg, m); if (error) { - if (collapsed) - goto fail; - - m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1); + m = m_defrag(m, M_NOWAIT); if (m == NULL) goto fail; *m_head = m; - collapsed = 1; - txq->vtntx_stats.vtxs_collapsed++; - goto again; + sc->vtnet_stats.tx_defragged++; + + error = sglist_append_mbuf(sg, m); + if (error) + goto fail; } txhdr->vth_mbuf = m; - error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0); + error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); return (error); fail: + sc->vtnet_stats.tx_defrag_failed++; m_freem(*m_head); *m_head = NULL; @@ -2238,6 +2258,12 @@ vtnet_txq_mq_start_locked(struct vtnet_t vtnet_txq_eof(txq); while ((m = drbr_peek(ifp, br)) != NULL) { + if (virtqueue_full(vq)) { + drbr_putback(ifp, br, m); + error = ENOBUFS; + break; + } + error = vtnet_txq_encap(txq, &m); if (error) { if (m != NULL) @@ -2306,6 +2332,24 @@ vtnet_txq_tq_deferred(void *xtxq, int pe #endif /* VTNET_LEGACY_TX */ static void +vtnet_txq_start(struct vtnet_txq *txq) +{ + struct vtnet_softc *sc; + struct ifnet *ifp; + + sc = txq->vtntx_sc; + ifp = sc->vtnet_ifp; + +#ifdef VTNET_LEGACY_TX + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + vtnet_start_locked(txq, ifp); +#else + if (!drbr_empty(ifp, txq->vtntx_br)) + vtnet_txq_mq_start_locked(txq, NULL); +#endif +} + +static void vtnet_txq_tq_intr(void *xtxq, int pending) { struct vtnet_softc *sc; @@ -2325,13 +2369,7 @@ vtnet_txq_tq_intr(void *xtxq, int pendin vtnet_txq_eof(txq); -#ifdef VTNET_LEGACY_TX - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - vtnet_start_locked(txq, ifp); -#else - if (!drbr_empty(ifp, txq->vtntx_br)) - vtnet_txq_mq_start_locked(txq, NULL); -#endif + vtnet_txq_start(txq); if (vtnet_txq_enable_intr(txq) != 0) { vtnet_txq_disable_intr(txq); @@ -2392,9 +2430,9 @@ vtnet_tx_vq_intr(void *xtxq) return; } -again: VTNET_TXQ_LOCK(txq); +again: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; @@ -2402,13 +2440,7 @@ again: vtnet_txq_eof(txq); -#ifdef VTNET_LEGACY_TX - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - vtnet_start_locked(txq, ifp); -#else - if (!drbr_empty(ifp, txq->vtntx_br)) - vtnet_txq_mq_start_locked(txq, NULL); -#endif + vtnet_txq_start(txq); if (vtnet_txq_enable_intr(txq) != 0) { vtnet_txq_disable_intr(txq); @@ -2416,9 +2448,10 @@ again: * This is an occasional race, so retry a few times * before scheduling the taskqueue. */ - VTNET_TXQ_UNLOCK(txq); if (tries++ < VTNET_INTR_DISABLE_RETRIES) goto again; + + VTNET_TXQ_UNLOCK(txq); txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } else @@ -2428,24 +2461,16 @@ again: static void vtnet_tx_start_all(struct vtnet_softc *sc) { - struct ifnet *ifp; struct vtnet_txq *txq; int i; - ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); -#ifdef VTNET_LEGACY_TX - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - vtnet_start_locked(txq, ifp); -#else - if (!drbr_empty(ifp, txq->vtntx_br)) - vtnet_txq_mq_start_locked(txq, NULL); -#endif + vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } } @@ -2521,7 +2546,6 @@ vtnet_txq_accum_stats(struct vtnet_txq * accum->vtxs_obytes += st->vtxs_obytes; accum->vtxs_csum += st->vtxs_csum; accum->vtxs_tso += st->vtxs_tso; - accum->vtxs_collapsed += st->vtxs_collapsed; accum->vtxs_rescheduled += st->vtxs_rescheduled; } @@ -2839,9 +2863,10 @@ vtnet_init_rx_queues(struct vtnet_softc sc->vtnet_rx_clsize = clsize; sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); - /* The first segment is reserved for the header. */ - KASSERT(sc->vtnet_rx_nmbufs < VTNET_MAX_RX_SEGS, - ("%s: too many rx mbufs %d", __func__, sc->vtnet_rx_nmbufs)); + KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || + sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, + ("%s: too many rx mbufs %d for %d segments", __func__, + sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; @@ -3483,6 +3508,7 @@ static void vtnet_set_hwaddr(struct vtnet_softc *sc) { device_t dev; + int i; dev = sc->vtnet_dev; @@ -3490,9 +3516,11 @@ vtnet_set_hwaddr(struct vtnet_softc *sc) if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) device_printf(dev, "unable to set MAC address\n"); } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { - virtio_write_device_config(dev, - offsetof(struct virtio_net_config, mac), - sc->vtnet_hwaddr, ETHER_ADDR_LEN); + for (i = 0; i < ETHER_ADDR_LEN; i++) { + virtio_write_dev_config_1(dev, + offsetof(struct virtio_net_config, mac) + i, + sc->vtnet_hwaddr[i]); + } } } @@ -3500,6 +3528,7 @@ static void vtnet_get_hwaddr(struct vtnet_softc *sc) { device_t dev; + int i; dev = sc->vtnet_dev; @@ -3517,8 +3546,10 @@ vtnet_get_hwaddr(struct vtnet_softc *sc) return; } - virtio_read_device_config(dev, offsetof(struct virtio_net_config, mac), - sc->vtnet_hwaddr, ETHER_ADDR_LEN); + for (i = 0; i < ETHER_ADDR_LEN; i++) { + sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, + offsetof(struct virtio_net_config, mac) + i); + } } static void @@ -3595,8 +3626,6 @@ vtnet_setup_txq_sysctl(struct sysctl_ctx &stats->vtxs_csum, "Transmit checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, &stats->vtxs_tso, "Transmit segmentation offloaded"); - SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "collapsed", CTLFLAG_RD, - &stats->vtxs_collapsed, "Transmit mbufs collapsed"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vtxs_rescheduled, "Transmit interrupt handler rescheduled"); @@ -3676,6 +3705,12 @@ vtnet_setup_stat_sysctl(struct sysctl_ct SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", CTLFLAG_RD, &stats->tx_tso_not_tcp, "Aborted transmit of TSO buffer with non TCP protocol"); + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", + CTLFLAG_RD, &stats->tx_defragged, + "Transmit mbufs defragged"); + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", + CTLFLAG_RD, &stats->tx_defrag_failed, + "Aborted transmit of buffer because defrag failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", CTLFLAG_RD, &stats->tx_csum_offloaded, "Offloaded checksum of transmitted buffer"); Modified: stable/10/sys/dev/virtio/network/if_vtnetvar.h ============================================================================== --- stable/10/sys/dev/virtio/network/if_vtnetvar.h Sat May 3 18:50:47 2014 (r265285) +++ stable/10/sys/dev/virtio/network/if_vtnetvar.h Sat May 3 19:40:41 2014 (r265286) @@ -44,6 +44,8 @@ struct vtnet_statistics { uint64_t tx_csum_bad_ethtype; uint64_t tx_tso_bad_ethtype; uint64_t tx_tso_not_tcp; + uint64_t tx_defragged; + uint64_t tx_defrag_failed; /* * These are accumulated from each Rx/Tx queue. @@ -70,6 +72,7 @@ struct vtnet_rxq { struct mtx vtnrx_mtx; struct vtnet_softc *vtnrx_sc; struct virtqueue *vtnrx_vq; + struct sglist *vtnrx_sg; int vtnrx_id; int vtnrx_process_limit; struct vtnet_rxq_stats vtnrx_stats; @@ -91,7 +94,6 @@ struct vtnet_txq_stats { uint64_t vtxs_omcasts; /* if_omcasts */ uint64_t vtxs_csum; uint64_t vtxs_tso; - uint64_t vtxs_collapsed; uint64_t vtxs_rescheduled; }; @@ -99,6 +101,7 @@ struct vtnet_txq { struct mtx vtntx_mtx; struct vtnet_softc *vtntx_sc; struct virtqueue *vtntx_vq; + struct sglist *vtntx_sg; #ifndef VTNET_LEGACY_TX struct buf_ring *vtntx_br; #endif @@ -143,9 +146,11 @@ struct vtnet_softc { int vtnet_link_active; int vtnet_hdr_size; int vtnet_rx_process_limit; + int vtnet_rx_nsegs; int vtnet_rx_nmbufs; int vtnet_rx_clsize; int vtnet_rx_new_clsize; + int vtnet_tx_nsegs; int vtnet_if_flags; int vtnet_act_vq_pairs; int vtnet_max_vq_pairs; @@ -293,11 +298,14 @@ CTASSERT(sizeof(struct vtnet_mac_filter) /* * Used to preallocate the Vq indirect descriptors. The first segment - * is reserved for the header. + * is reserved for the header, except for mergeable buffers since the + * header is placed inline with the data. */ +#define VTNET_MRG_RX_SEGS 1 #define VTNET_MIN_RX_SEGS 2 #define VTNET_MAX_RX_SEGS 34 -#define VTNET_MAX_TX_SEGS 34 +#define VTNET_MIN_TX_SEGS 4 +#define VTNET_MAX_TX_SEGS 64 /* * Assert we can receive and transmit the maximum with regular @@ -314,7 +322,7 @@ CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLB /* * Determine how many mbufs are in each receive buffer. For LRO without - * mergeable descriptors, we must allocate an mbuf chain large enough to + * mergeable buffers, we must allocate an mbuf chain large enough to * hold both the vtnet_rx_header and the maximum receivable data. */ #define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \