Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 14 Dec 2012 08:07:37 +0000 (UTC)
From:      Bryan Venteicher <bryanv@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r244203 - in projects/vmxnet/sys: dev/vmware dev/vmware/vmxnet modules/vmware modules/vmware/vmxnet
Message-ID:  <201212140807.qBE87bN6019824@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: bryanv
Date: Fri Dec 14 08:07:37 2012
New Revision: 244203
URL: http://svnweb.freebsd.org/changeset/base/244203

Log:
  Initial import of the OpenBSD if_vic ethernet driver
  
  This started off as a quick and dirty port done on a boring
  flight, but has basically turned into a complete rewrite of
  much of the original code, making all bugs my own.
  
  There are still a couple of XXX comments to be addressed, and
  more research on the ESXi and Fusion black boxes needs to be
  completed.
  
  The rest of the work needed to hook this to the build will
  come in a later commit.
  
  Approved by:   grehan (mentor)

Added:
  projects/vmxnet/sys/dev/vmware/
  projects/vmxnet/sys/dev/vmware/vmxnet/
  projects/vmxnet/sys/dev/vmware/vmxnet/if_vic.c   (contents, props changed)
  projects/vmxnet/sys/dev/vmware/vmxnet/if_vicreg.h   (contents, props changed)
  projects/vmxnet/sys/dev/vmware/vmxnet/if_vicvar.h   (contents, props changed)
  projects/vmxnet/sys/modules/vmware/
  projects/vmxnet/sys/modules/vmware/Makefile   (contents, props changed)
  projects/vmxnet/sys/modules/vmware/vmxnet/
  projects/vmxnet/sys/modules/vmware/vmxnet/Makefile   (contents, props changed)

Added: projects/vmxnet/sys/dev/vmware/vmxnet/if_vic.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/vmxnet/sys/dev/vmware/vmxnet/if_vic.c	Fri Dec 14 08:07:37 2012	(r244203)
@@ -0,0 +1,2024 @@
+/*-
+ * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
+ * Copyright (c) 2012 Bryan Venteicher <bryanv@freebsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $OpenBSD: if_vic.c,v 1.77 2011/11/29 11:53:25 jsing Exp $
+ */
+
+/* Driver for VMware Virtual NIC ("vmxnet") devices. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "if_vicreg.h"
+#include "if_vicvar.h"
+
+/*
+ * The behavior of the second Rx queue is a bit uncertain. It appears to
+ * be capable holding fragments from the first Rx queue. It also appears
+ * capable of containing entire packets. IMO the former behavior is more
+ * desirable, but I cannot determine how to enable it, so default to the
+ * later. The two behaviors cannot be used simultaneously.
+ */
+#define VIC_NOFRAG_RXQUEUE
+
+static int	vic_probe(device_t);
+static int	vic_attach(device_t);
+static int	vic_detach(device_t);
+static int	vic_shutdown(device_t);
+
+static int	vic_query(struct vic_softc *);
+static uint32_t	vic_read(struct vic_softc *, bus_size_t);
+static void	vic_write(struct vic_softc *, bus_size_t, uint32_t);
+static uint32_t	vic_read_cmd(struct vic_softc *, uint32_t);
+
+static int	vic_alloc_ring_bufs(struct vic_softc *);
+static void	vic_init_shared_mem(struct vic_softc *);
+static int	vic_alloc_data(struct vic_softc *);
+static void	vic_free_data(struct vic_softc *);
+static void	vic_dmamap_cb(void *, bus_dma_segment_t *, int, int);
+static int	vic_alloc_dma(struct vic_softc *);
+static void	vic_free_dma(struct vic_softc *);
+
+static int	vic_init_rings(struct vic_softc *sc);
+static void	vic_init_locked(struct vic_softc *);
+static void	vic_init(void *);
+
+static int 	vic_encap_load_mbuf(struct vic_softc *, struct mbuf **, int,
+		    bus_dmamap_t, bus_dma_segment_t [], int *);
+static void 	vic_assign_sge(struct vic_sg *, bus_dma_segment_t *);
+static int	vic_encap(struct vic_softc *, struct mbuf **);
+static void	vic_start_locked(struct ifnet *);
+static void	vic_start(struct ifnet *);
+static void	vic_watchdog(struct vic_softc *);
+
+static void	vic_free_rx_rings(struct vic_softc *);
+static void	vic_free_tx_ring(struct vic_softc *);
+static void	vic_tx_quiesce_wait(struct vic_softc *);
+static void	vic_stop(struct vic_softc *);
+
+static int	vic_newbuf(struct vic_softc *, struct vic_rxqueue *, int);
+static void	vic_rxeof_discard(struct vic_softc *, struct vic_rxqueue *,
+		    int);
+static void	vic_rxeof_discard_frags(struct vic_softc *);
+static int 	vic_rxeof_frag(struct vic_softc *, struct mbuf *);
+static void	vic_rxeof(struct vic_softc *, int);
+static void	vic_txeof(struct vic_softc *);
+static void	vic_intr(void *);
+
+static void	vic_set_ring_sizes(struct vic_softc *);
+static void	vic_link_state(struct vic_softc *);
+static void	vic_set_rxfilter(struct vic_softc *);
+static void	vic_get_lladdr(struct vic_softc *);
+static void	vic_set_lladdr(struct vic_softc *);
+static int	vic_media_change(struct ifnet *);
+static void	vic_media_status(struct ifnet *, struct ifmediareq *);
+static int	vic_ioctl(struct ifnet *, u_long, caddr_t);
+static void	vic_tick(void *);
+
+static int	vic_pcnet_masquerade(device_t);
+static void	vic_pcnet_restore(struct vic_softc *);
+static int	vic_pcnet_transform(struct vic_softc *);
+
+static void	vic_sysctl_node(struct vic_softc *);
+
+static void	vic_barrier(struct vic_softc *, int);
+
+#define VIC_VMWARE_VENDORID	0x15AD
+#define VIC_VMWARE_DEVICEID	0x0720
+#define VIC_PCNET_VENDORID	0x1022 /* PCN_VENDORID */
+#define VIC_PCNET_DEVICEID	0x2000 /* PCN_DEVICEID_PCNET */
+
+static device_method_t vic_methods[] = {
+	/* Device interface. */
+	DEVMETHOD(device_probe,		vic_probe),
+	DEVMETHOD(device_attach,	vic_attach),
+	DEVMETHOD(device_detach,	vic_detach),
+	DEVMETHOD(device_shutdown,	vic_shutdown),
+
+	DEVMETHOD_END
+};
+
+static driver_t vic_driver = {
+	"vic", vic_methods, sizeof(struct vic_softc)
+};
+
+static devclass_t vic_devclass;
+DRIVER_MODULE(vic, pci, vic_driver, vic_devclass, 0, 0);
+
+MODULE_DEPEND(vic, pci, 1, 1, 1);
+MODULE_DEPEND(vic, ether, 1, 1, 1);
+
+static int
+vic_probe(device_t dev)
+{
+	uint16_t vendorid, deviceid;
+
+	vendorid = pci_get_vendor(dev);
+	deviceid = pci_get_device(dev);
+
+	if (vendorid == VIC_VMWARE_VENDORID &&
+	    deviceid == VIC_VMWARE_DEVICEID) {
+		device_set_desc(dev, "VMWare Ethernet Adapter");
+		return (BUS_PROBE_DEFAULT);
+	}
+
+	if (vendorid == VIC_PCNET_VENDORID &&
+	    deviceid == VIC_PCNET_DEVICEID) {
+		/*
+		 * The hypervisor can present us with a PCNet device
+		 * that we can transform to a vmxnet interface.
+		 */
+		if (vic_pcnet_masquerade(dev) == 0) {
+			device_set_desc(dev,
+			    "VMWare (Flexible) Ethernet Adapter");
+			return (BUS_PROBE_VENDOR);
+		}
+	}
+
+	return (ENXIO);
+}
+
+static int
+vic_attach(device_t dev)
+{
+	struct vic_softc *sc;
+	struct ifnet *ifp;
+	int rid, error;
+
+	sc = device_get_softc(dev);
+	sc->vic_dev = dev;
+
+	VIC_LOCK_INIT(sc, device_get_nameunit(dev));
+	VIC_RX_LOCK_INIT(sc, device_get_nameunit(dev));
+	VIC_TX_LOCK_INIT(sc, device_get_nameunit(dev));
+	callout_init_mtx(&sc->vic_tick, &sc->vic_mtx, 0);
+
+	rid = PCIR_BAR(VIC_PCI_BAR);
+	sc->vic_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
+	    RF_ACTIVE);
+	if (sc->vic_res == NULL) {
+		device_printf(dev, "could not map BAR(VIC_PCI_BAR) memory\n");
+		error = ENXIO;
+		goto fail;
+	}
+
+	sc->vic_iot = rman_get_bustag(sc->vic_res);
+	sc->vic_ioh = rman_get_bushandle(sc->vic_res);
+
+	rid = 0;
+	sc->vic_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+	    RF_SHAREABLE | RF_ACTIVE);
+	if (sc->vic_irq == NULL) {
+		device_printf(dev, "could not allocate interrupt\n");
+		error = ENXIO;
+		goto fail;
+	}
+
+	if (pci_get_vendor(dev) == VIC_PCNET_VENDORID &&
+	    pci_get_device(dev) == VIC_PCNET_DEVICEID) {
+		/* Turn this 'flexible' adapter into a vmxnet device. */
+		error = vic_pcnet_transform(sc);
+		if (error)
+			goto fail;
+	}
+
+	if (vic_query(sc) != 0) {
+		error = ENXIO;
+		goto fail;
+	}
+
+	if (vic_alloc_data(sc) != 0) {
+		error = ENXIO;
+		goto fail;
+	}
+
+	ifp = sc->vic_ifp = if_alloc(IFT_ETHER);
+	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+	if_initbaudrate(ifp, IF_Gbps(1));
+	ifp->if_softc = sc;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_init = vic_init;
+	ifp->if_ioctl = vic_ioctl;
+	ifp->if_start = vic_start;
+	ifp->if_snd.ifq_drv_maxlen = sc->vic_tx_nbufs - 1;
+	IFQ_SET_MAXLEN(&ifp->if_snd, sc->vic_tx_nbufs - 1);
+	IFQ_SET_READY(&ifp->if_snd);
+
+	ether_ifattach(ifp, sc->vic_lladdr);
+
+	/* Tell the upper layer(s) we support long frames. */
+	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+	ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+	if (sc->vic_cap & VIC_CMD_HWCAP_VLAN)
+		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
+	if (sc->vic_cap & VIC_CMD_HWCAP_CSUM) {
+		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
+		ifp->if_hwassist |= VIC_CSUM_FEATURES;
+	}
+
+	ifp->if_capenable = ifp->if_capabilities;
+
+	if (sc->vic_flags & VIC_FLAGS_TSO) {
+		ifp->if_hwassist |= CSUM_TSO;
+		if (sc->vic_cap & VIC_CMD_HWCAP_TSO)
+			ifp->if_capabilities |= IFCAP_TSO4;
+	}
+	if (sc->vic_flags & VIC_FLAGS_LRO)
+		ifp->if_capabilities |= IFCAP_LRO;
+
+	ifmedia_init(&sc->vic_media, 0, vic_media_change, vic_media_status);
+	ifmedia_add(&sc->vic_media, IFM_ETHER | IFM_AUTO, 0, NULL);
+	ifmedia_set(&sc->vic_media, IFM_ETHER | IFM_AUTO);
+
+	error = bus_setup_intr(dev, sc->vic_irq,
+	    INTR_TYPE_NET | INTR_MPSAFE, NULL, vic_intr, sc, &sc->vic_intrhand);
+	if (error) {
+		ether_ifdetach(ifp);
+		device_printf(dev, "could not set up interrupt\n");
+		goto fail;
+	}
+
+	if (bootverbose) {
+		device_printf(dev,
+		    "feature 0x%b cap 0x%b rxbuf %d/%d txbuf %d\n",
+		    sc->vic_feature, VIC_CMD_FEATURE_BITS, sc->vic_cap,
+		    VIC_CMD_HWCAP_BITS, sc->vic_rxq[0].nbufs,
+		    sc->vic_rxq[1].nbufs, sc->vic_tx_nbufs);
+	}
+
+	vic_sysctl_node(sc);
+
+fail:
+	if (error)
+		vic_detach(dev);
+
+	return (error);
+}
+
+static int
+vic_detach(device_t dev)
+{
+	struct vic_softc *sc;
+	struct ifnet *ifp;
+
+	sc = device_get_softc(dev);
+	ifp = sc->vic_ifp;
+
+	if (device_is_attached(dev)) {
+		ether_ifdetach(ifp);
+		VIC_LOCK(sc);
+		vic_stop(sc);
+		VIC_UNLOCK(sc);
+		callout_drain(&sc->vic_tick);
+	}
+
+	if (sc->vic_intrhand != NULL) {
+		bus_teardown_intr(dev, sc->vic_irq, sc->vic_intrhand);
+		sc->vic_intrhand = NULL;
+	}
+
+	if (ifp != NULL) {
+		if_free(ifp);
+		sc->vic_ifp = NULL;
+	}
+
+	vic_free_data(sc);
+
+	if (sc->vic_irq != NULL) {
+		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vic_irq);
+		sc->vic_irq = NULL;
+	}
+
+	if (sc->vic_res != NULL) {
+		if (sc->vic_flags & VIC_FLAGS_MORPHED_PCNET)
+			vic_pcnet_restore(sc);
+
+		bus_release_resource(dev, SYS_RES_IOPORT,
+		    PCIR_BAR(VIC_PCI_BAR), sc->vic_res);
+		sc->vic_res = NULL;
+	}
+
+	VIC_TX_LOCK_DESTROY(sc);
+	VIC_RX_LOCK_DESTROY(sc);
+	VIC_LOCK_DESTROY(sc);
+
+	return (0);
+}
+
+static int
+vic_shutdown(device_t dev)
+{
+
+	return (0);
+}
+
+static int
+vic_query(struct vic_softc *sc)
+{
+	device_t dev;
+	uint32_t major, minor;
+
+	dev = sc->vic_dev;
+	major = vic_read(sc, VIC_VERSION_MAJOR);
+	minor = vic_read(sc, VIC_VERSION_MINOR);
+
+	/* Check for a supported version. */
+	if ((major & VIC_VERSION_MAJOR_M) !=
+	    (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
+		device_printf(dev, "magic mismatch\n");
+		return (1);
+	}
+
+	if (VIC_MAGIC > major || VIC_MAGIC < minor) {
+		device_printf(dev, "unsupported version (%#X)\n",
+		    major & ~VIC_VERSION_MAJOR_M);
+		return (1);
+	}
+
+	sc->vic_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
+	sc->vic_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
+
+	vic_get_lladdr(sc);
+
+	if (sc->vic_feature & VIC_CMD_FEATURE_JUMBO)
+		sc->vic_flags |= VIC_FLAGS_JUMBO;
+
+	if (sc->vic_cap & VIC_CMD_HWCAP_LPD &&
+	    sc->vic_cap & VIC_CMD_HWCAP_RX_CHAIN &&
+	    sc->vic_feature & VIC_CMD_FEATURE_LPD)
+		sc->vic_flags |= VIC_FLAGS_LRO;
+
+	if (sc->vic_cap & VIC_CMD_HWCAP_SG)
+		sc->vic_sg_max = VIC_SG_MAX;
+	else
+		sc->vic_sg_max = 1;
+
+	if (sc->vic_cap & VIC_CMD_HWCAP_SG &&
+	    sc->vic_cap & VIC_CMD_HWCAP_TSO &&
+	    sc->vic_cap & VIC_CMD_HWCAP_TX_CHAIN &&
+	    sc->vic_feature & VIC_CMD_FEATURE_TSO)
+		sc->vic_flags |= VIC_FLAGS_TSO;
+
+	if (sc->vic_flags & VIC_VMXNET2_FLAGS)
+		sc->vic_flags |= VIC_FLAGS_ENHANCED;
+
+	vic_set_ring_sizes(sc);
+
+	return (0);
+}
+
+static uint32_t
+vic_read(struct vic_softc *sc, bus_size_t r)
+{
+
+	r += sc->vic_ioadj;
+
+	bus_space_barrier(sc->vic_iot, sc->vic_ioh, r, 4,
+	    BUS_SPACE_BARRIER_READ);
+	return (bus_space_read_4(sc->vic_iot, sc->vic_ioh, r));
+}
+
+static void
+vic_write(struct vic_softc *sc, bus_size_t r, uint32_t v)
+{
+
+	r += sc->vic_ioadj;
+
+	bus_space_write_4(sc->vic_iot, sc->vic_ioh, r, v);
+	bus_space_barrier(sc->vic_iot, sc->vic_ioh, r, 4,
+	    BUS_SPACE_BARRIER_WRITE);
+}
+
+static uint32_t
+vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
+{
+
+	vic_write(sc, VIC_CMD, cmd);
+	return (vic_read(sc, VIC_CMD));
+}
+
+static int
+vic_alloc_ring_bufs(struct vic_softc *sc)
+{
+	device_t dev;
+	struct vic_rxqueue *rxq;
+	int q;
+
+	dev = sc->vic_dev;
+
+	for (q = 0; q < VIC_NRXRINGS; q++) {
+		rxq = &sc->vic_rxq[q];
+
+		if (q == 0)
+			rxq->pktlen = MCLBYTES;
+		else
+			rxq->pktlen = MJUMPAGESIZE;
+
+		rxq->bufs = malloc(sizeof(struct vic_rxbuf) * rxq->nbufs,
+		    M_DEVBUF, M_NOWAIT | M_ZERO);
+		if (rxq->bufs == NULL) {
+			device_printf(dev,
+			    "unable to allocate rxbuf for ring %d\n", q);
+			return (ENOMEM);
+		}
+	}
+
+	sc->vic_txbuf = malloc(sizeof(struct vic_txbuf) * sc->vic_tx_nbufs,
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (sc->vic_txbuf == NULL) {
+		device_printf(dev, "unable to allocate txbuf\n");
+		return (ENOMEM);
+	}
+
+	return (0);
+}
+
+static void
+vic_init_shared_mem(struct vic_softc *sc)
+{
+	uint8_t *kva;
+	u_int offset;
+	int q;
+
+	kva = sc->vic_dma_kva;
+	sc->vic_data = (struct vic_data *) kva;
+
+	sc->vic_data->vd_magic = VIC_MAGIC;
+	sc->vic_data->vd_length = sc->vic_dma_size;
+
+	offset = sizeof(struct vic_data);
+
+	for (q = 0; q < VIC_NRXRINGS; q++) {
+		sc->vic_rxq[q].slots = (struct vic_rxdesc *) &kva[offset];
+		sc->vic_data->vd_rx_offset[q] = offset;
+		sc->vic_data->vd_rx[q].length = sc->vic_rxq[q].nbufs;
+
+		offset += sc->vic_rxq[q].nbufs * sizeof(struct vic_rxdesc);
+	}
+
+	sc->vic_txq = (struct vic_txdesc *) &kva[offset];
+	sc->vic_data->vd_tx_offset = offset;
+	sc->vic_data->vd_tx_length = sc->vic_tx_nbufs;
+
+	if (sc->vic_flags & VIC_FLAGS_TSO)
+		sc->vic_data->vd_tx_maxfrags = VIC_TSO_MAXSEGS;
+	else
+		sc->vic_data->vd_tx_maxfrags = sc->vic_sg_max;
+}
+
+static int
+vic_alloc_data(struct vic_softc *sc)
+{
+	int error;
+
+	error = vic_alloc_ring_bufs(sc);
+	if (error)
+		return (error);
+
+	error = vic_alloc_dma(sc);
+	if (error)
+		return (error);
+
+	vic_init_shared_mem(sc);
+
+	return (0);
+}
+
+static void
+vic_free_data(struct vic_softc *sc)
+{
+	int q;
+
+	vic_free_dma(sc);
+
+	if (sc->vic_txbuf != NULL) {
+		free(sc->vic_txbuf, M_DEVBUF);
+		sc->vic_txbuf = NULL;
+	}
+
+	for (q = 0; q < VIC_NRXRINGS; q++) {
+		if (sc->vic_rxq[q].bufs != NULL) {
+			free(sc->vic_rxq[q].bufs, M_DEVBUF);
+			sc->vic_rxq[q].bufs = NULL;
+		}
+	}
+}
+
+static void
+vic_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+	bus_addr_t *baddr = arg;
+
+	if (error == 0)
+		*baddr = segs->ds_addr;
+}
+
+static int
+vic_alloc_dma(struct vic_softc *sc)
+{
+	device_t dev;
+	struct vic_rxbuf *rxb;
+	struct vic_txbuf *txb;
+	struct vic_rxqueue *rxq;
+	size_t size;
+	bus_size_t txmaxsz, txnsegs;
+	int q, i, error;
+
+	dev = sc->vic_dev;
+
+	/*
+	 * Calculate the size of all the structures shared with the
+	 * host. This allocation must be physically contiguous.
+	 */
+	size = sizeof(struct vic_data);
+	for (q = 0; q < VIC_NRXRINGS; q++)
+		size += sc->vic_rxq[q].nbufs * sizeof(struct vic_rxdesc);
+	size += sc->vic_tx_nbufs * sizeof(struct vic_txdesc);
+	sc->vic_dma_size = size;
+
+	error = bus_dma_tag_create(bus_get_dma_tag(dev),
+	    PAGE_SIZE, 0,		/* alignment, boundary */
+	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
+	    BUS_SPACE_MAXADDR,		/* highaddr */
+	    NULL, NULL,			/* filter, filterarg */
+	    sc->vic_dma_size,		/* maxsize */
+	    1,				/* nsegments */
+	    sc->vic_dma_size,		/* maxsegsize */
+	    BUS_DMA_ALLOCNOW,		/* flags */
+	    NULL, NULL,			/* lockfunc, lockarg */
+	    &sc->vic_dma_tag);
+	if (error) {
+		device_printf(dev, "cannot create dma tag\n");
+		return (error);
+	}
+
+	error = bus_dmamem_alloc(sc->vic_dma_tag, (void **) &sc->vic_dma_kva,
+	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->vic_dma_map);
+	if (error) {
+		device_printf(dev, "cannot allocate dma memory\n");
+		return (error);
+	}
+
+	error = bus_dmamap_load(sc->vic_dma_tag, sc->vic_dma_map,
+	    sc->vic_dma_kva, sc->vic_dma_size, vic_dmamap_cb,
+	    &sc->vic_dma_paddr, BUS_DMA_NOWAIT);
+	if (error) {
+		device_printf(dev, "cannot load dmamap\n");
+		return (error);
+	}
+
+	for (q = 0; q < VIC_NRXRINGS; q++) {
+		rxq = &sc->vic_rxq[q];
+
+		error = bus_dma_tag_create(bus_get_dma_tag(dev),
+		    1, 0,			/* alignment, boundary */
+		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
+		    BUS_SPACE_MAXADDR,		/* highaddr */
+		    NULL, NULL,			/* filter, filterarg */
+		    rxq->pktlen,		/* maxsize */
+		    1,				/* nsegments */
+		    rxq->pktlen,		/* maxsegsize */
+		    0,				/* flags */
+		    NULL, NULL,			/* lockfunc, lockarg */
+		    &rxq->tag);
+		if (error) {
+			device_printf(dev,
+			    "cannot create Rx buffer tag for ring %d\n", q);
+			return (error);
+		}
+
+		error = bus_dmamap_create(rxq->tag, 0, &rxq->spare_dmamap);
+		if (error) {
+			device_printf(dev, "unable to create spare dmamap "
+			    "for ring %d\n", q);
+			return (error);
+		}
+
+		for (i = 0; i < rxq->nbufs; i++) {
+			rxb = &rxq->bufs[i];
+
+			error = bus_dmamap_create(rxq->tag, 0,
+			    &rxb->rxb_dmamap);
+			if (error) {
+				device_printf(dev, "unable to create dmamap "
+				    "for ring %d slot %d\n", q, i);
+				return (error);
+			}
+		}
+	}
+
+	if (sc->vic_flags & VIC_FLAGS_TSO) {
+		txmaxsz = VIC_TSO_MAXSIZE;
+		txnsegs = VIC_TSO_MAXSEGS;
+	} else {
+		txmaxsz = sc->vic_sg_max * VIC_TX_MAXSEGSIZE;
+		txnsegs = sc->vic_sg_max;
+	}
+
+	error = bus_dma_tag_create(bus_get_dma_tag(dev),
+	    1, 0,			/* alignment, boundary */
+	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
+	    BUS_SPACE_MAXADDR,		/* highaddr */
+	    NULL, NULL,			/* filter, filterarg */
+	    txmaxsz,			/* maxsize */
+	    txnsegs,			/* nsegments */
+	    VIC_TX_MAXSEGSIZE,		/* maxsegsize */
+	    0,				/* flags */
+	    NULL, NULL,			/* lockfunc, lockarg */
+	    &sc->vic_tx_tag);
+	if (error) {
+		device_printf(dev, "unable to create Tx buffer tag\n");
+		return (error);
+	}
+
+	for (i = 0; i < sc->vic_tx_nbufs; i++) {
+		txb = &sc->vic_txbuf[i];
+
+		error = bus_dmamap_create(sc->vic_tx_tag, 0, &txb->txb_dmamap);
+		if (error) {
+			device_printf(dev,
+			    "unable to create dmamap for tx %d\n", i);
+			return (error);
+		}
+	}
+
+	return (0);
+}
+
+static void
+vic_free_dma(struct vic_softc *sc)
+{
+	struct vic_txbuf *txb;
+	struct vic_rxbuf *rxb;
+	struct vic_rxqueue *rxq;
+	int q, i;
+
+	if (sc->vic_tx_tag != NULL) {
+		for (i = 0; i < sc->vic_tx_nbufs; i++) {
+			txb = &sc->vic_txbuf[i];
+
+			if (txb->txb_dmamap != NULL) {
+				bus_dmamap_destroy(sc->vic_tx_tag,
+				    txb->txb_dmamap);
+				txb->txb_dmamap = NULL;
+			}
+		}
+
+		bus_dma_tag_destroy(sc->vic_tx_tag);
+		sc->vic_tx_tag = NULL;
+	}
+
+	for (q = 0; q < VIC_NRXRINGS; q++) {
+		rxq = &sc->vic_rxq[q];
+
+		if (rxq->tag == NULL)
+			continue;
+
+		if (rxq->spare_dmamap != NULL) {
+			bus_dmamap_destroy(rxq->tag, rxq->spare_dmamap);
+			rxq->spare_dmamap = NULL;
+		}
+
+		for (i = 0; i < rxq->nbufs; i++) {
+			rxb = &rxq->bufs[i];
+
+			if (rxb->rxb_dmamap != NULL) {
+				bus_dmamap_destroy(rxq->tag, rxb->rxb_dmamap);
+				rxb->rxb_dmamap = NULL;
+			}
+		}
+
+		bus_dma_tag_destroy(rxq->tag);
+		rxq->tag = NULL;
+	}
+
+	if (sc->vic_dma_tag != NULL) {
+		if (sc->vic_dma_map != NULL)
+			bus_dmamap_unload(sc->vic_dma_tag,
+			    sc->vic_dma_map);
+		if (sc->vic_dma_map != NULL && sc->vic_dma_kva != NULL)
+			bus_dmamem_free(sc->vic_dma_tag, sc->vic_dma_kva,
+			    sc->vic_dma_map);
+		sc->vic_dma_kva = NULL;
+		sc->vic_dma_map = NULL;
+
+		bus_dma_tag_destroy(sc->vic_dma_tag);
+		sc->vic_dma_tag = NULL;
+	}
+}
+
+static int
+vic_init_rings(struct vic_softc *sc)
+{
+	struct vic_rxqueue *rxq;
+	struct vic_txdesc *txd;
+	int q, i, error;
+
+	for (q = 0; q < VIC_NRXRINGS; q++) {
+		rxq = &sc->vic_rxq[q];
+
+		sc->vic_data->vd_rx[q].nextidx = 0;
+		sc->vic_data->vd_rx_saved_nextidx[q] = 0;
+
+		for (i = 0; i < rxq->nbufs; i++) {
+			error = vic_newbuf(sc, rxq, i);
+			if (error)
+				return (error);
+		}
+	}
+
+	for (i = 0; i < sc->vic_tx_nbufs; i++) {
+		txd = &sc->vic_txq[i];
+
+		txd->tx_flags = 0;
+		txd->tx_tsomss = 0;
+		txd->tx_owner = VIC_OWNER_DRIVER;
+	}
+
+	sc->vic_data->vd_tx_curidx = 0;
+	sc->vic_data->vd_tx_nextidx = 0;
+	sc->vic_data->vd_tx_stopped = 0;
+	sc->vic_data->vd_tx_queued = 0;
+	sc->vic_data->vd_tx_saved_nextidx = 0;
+
+	return (0);
+}
+
+static void
+vic_init_locked(struct vic_softc *sc)
+{
+	struct ifnet *ifp;
+
+	ifp = sc->vic_ifp;
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+		return;
+
+	vic_stop(sc);
+
+	if (vic_init_rings(sc) != 0) {
+		vic_stop(sc);
+		return;
+	}
+
+	bus_dmamap_sync(sc->vic_dma_tag, sc->vic_dma_map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	vic_write(sc, VIC_DATA_ADDR, sc->vic_dma_paddr);
+	vic_write(sc, VIC_DATA_LENGTH, sc->vic_dma_size);
+
+	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+	vic_set_rxfilter(sc);
+	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
+
+	callout_reset(&sc->vic_tick, hz, vic_tick, sc);
+}
+
+static void
+vic_init(void *xsc)
+{
+	struct vic_softc *sc;
+
+	sc = xsc;
+
+	VIC_LOCK(sc);
+	vic_init_locked(sc);
+	VIC_UNLOCK(sc);
+}
+
+static int
+vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+	struct vic_softc *sc;
+	struct ifreq *ifr;
+#if defined(INET) || defined(INET6)
+	struct ifaddr *ifa = data;
+	int avoid_reset = 0;
+#endif
+	int mask, error;
+
+	sc = ifp->if_softc;
+	ifr = (struct ifreq *) data;
+	error = 0;
+
+	switch (cmd) {
+	case SIOCSIFADDR:
+#ifdef INET
+		if (ifa->ifa_addr->sa_family == AF_INET)
+			avoid_reset = 1;
+#endif
+#ifdef INET6
+		if (ifa->ifa_addr->sa_family == AF_INET6)
+			avoid_reset = 1;
+#endif
+#if defined(INET) || defined(INET6)
+		if (avoid_reset != 0) {
+			ifp->if_flags |= IFF_UP;
+			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+				vic_init(adapter);
+			if ((ifp->if_flags & IFF_NOARP) == 0)
+				arp_ifinit(ifp, ifa);
+		} else
+			error = ether_ioctl(ifp, command, data);
+#endif
+		break;
+
+	case SIOCSIFMTU:
+		VIC_LOCK(sc);
+		if (ifr->ifr_mtu < ETHERMIN)
+			error = EINVAL;
+		else if (ifr->ifr_mtu > ETHERMTU &&
+		    (sc->vic_flags & VIC_FLAGS_JUMBO) == 0)
+			error = EINVAL;
+		else if (ifr->ifr_mtu > VIC_JUMBO_MTU)
+			error = EINVAL;
+		else
+			ifp->if_mtu = ifr->ifr_mtu;
+		VIC_UNLOCK(sc);
+		break;
+
+	case SIOCSIFFLAGS:
+		VIC_LOCK(sc);
+		if (ifp->if_flags & IFF_UP) {
+			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+				if ((ifp->if_flags ^ sc->vic_if_flags) &
+				    (IFF_PROMISC | IFF_ALLMULTI)) {
+					vic_set_rxfilter(sc);
+                                }
+			} else
+				vic_init_locked(sc);
+		} else {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				vic_stop(sc);
+		}
+		sc->vic_if_flags = ifp->if_flags;
+		VIC_UNLOCK(sc);
+		break;
+
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		VIC_LOCK(sc);
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+			vic_set_rxfilter(sc);
+		VIC_UNLOCK(sc);
+		break;
+
+	case SIOCGIFMEDIA:
+	case SIOCSIFMEDIA:
+		error = ifmedia_ioctl(ifp, ifr, &sc->vic_media, cmd);
+		break;
+
+	case SIOCSIFCAP:
+		VIC_LOCK(sc);
+		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+		if ((mask & IFCAP_TXCSUM) != 0 &&
+		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
+			ifp->if_capenable ^= IFCAP_TXCSUM;
+			if (ifp->if_capenable & IFCAP_TXCSUM)
+				ifp->if_hwassist |= VIC_CSUM_FEATURES;
+			else
+				ifp->if_hwassist &= ~VIC_CSUM_FEATURES;
+		}
+
+		if ((mask & IFCAP_RXCSUM) != 0 &&
+		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
+			/*
+			 * We cannot seem to be able to disable this on the
+			 * host, but we can just ignore the checksum advice
+			 * it provides. Depending on the behavior of VMWare,
+			 * this could cause breakages when the source is a
+			 * VM in the same host.
+			 */
+			ifp->if_capenable ^= IFCAP_RXCSUM;
+		}
+
+		if ((mask & IFCAP_TSO4) != 0 &&
+		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
+			ifp->if_capenable ^= IFCAP_TSO4;
+			if ((ifp->if_capenable & IFCAP_TSO4) != 0)
+				ifp->if_hwassist |= CSUM_TSO;
+			else
+				ifp->if_hwassist &= ~CSUM_TSO;
+		}
+
+		if ((mask & IFCAP_LRO) != 0 &&
+		    (ifp->if_capabilities & IFCAP_LRO) != 0) {
+			ifp->if_capenable ^= IFCAP_LRO;
+			if (ifp->if_capenable & IFCAP_LRO)
+				sc->vic_data->vd_features |=
+				    VIC_CMD_FEATURE_LPD;
+			else
+				sc->vic_data->vd_features &=
+				    ~VIC_CMD_FEATURE_LPD;
+		}
+
+		VIC_UNLOCK(sc);
+		VLAN_CAPABILITIES(ifp);
+		break;
+
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	return (error);
+}
+
+static int
+vic_encap_load_mbuf(struct vic_softc *sc, struct mbuf **m0, int tso,
+    bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
+{
+	struct mbuf *m;
+	bus_dma_tag_t tag;
+	int maxsegs, error;
+
+	m = *m0;
+	tag = sc->vic_tx_tag;
+	maxsegs = tso ? VIC_TSO_MAXSEGS : sc->vic_sg_max;
+
+	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
+	if (error == 0) {
+		/*
+		 * When TSO is available, the Tx DMA map is set up to hold the
+		 * maximum possible packet size. But for non-TSO packets, we
+		 * don't want to have to chain Tx descriptors together.
+		 */
+		if (*nsegs <= maxsegs)
+			return (0);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201212140807.qBE87bN6019824>