Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 8 Jul 2013 00:36:43 +0000 (UTC)
From:      Bryan Venteicher <bryanv@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r253014 - in projects/vmxnet/sys: dev/vmware/vmxnet3 modules/vmware modules/vmware/vmxnet3
Message-ID:  <201307080036.r680ahKV099461@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: bryanv
Date: Mon Jul  8 00:36:42 2013
New Revision: 253014
URL: http://svnweb.freebsd.org/changeset/base/253014

Log:
  Initial port of the OpenBSD vmxnet3 driver
  
  Appears to have basic functionality, but a lot of work remains. Has
  only been tested on QEMU's vmxnet3 backend.

Added:
  projects/vmxnet/sys/dev/vmware/vmxnet3/
  projects/vmxnet/sys/dev/vmware/vmxnet3/if_vmx.c   (contents, props changed)
  projects/vmxnet/sys/dev/vmware/vmxnet3/if_vmxreg.h   (contents, props changed)
  projects/vmxnet/sys/dev/vmware/vmxnet3/if_vmxvar.h   (contents, props changed)
  projects/vmxnet/sys/modules/vmware/vmxnet3/
  projects/vmxnet/sys/modules/vmware/vmxnet3/Makefile   (contents, props changed)
Modified:
  projects/vmxnet/sys/modules/vmware/Makefile

Added: projects/vmxnet/sys/dev/vmware/vmxnet3/if_vmx.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/vmxnet/sys/dev/vmware/vmxnet3/if_vmx.c	Mon Jul  8 00:36:42 2013	(r253014)
@@ -0,0 +1,2047 @@
+/*-
+ * Copyright (c) 2013 Tsubai Masanari
+ * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
+ */
+
+/* Driver for VMware vmxnet3 virtual ethernet devices. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "if_vmxreg.h"
+#include "if_vmxvar.h"
+
+static int	vmxnet3_probe(device_t);
+static int	vmxnet3_attach(device_t);
+static int	vmxnet3_detach(device_t);
+static int	vmxnet3_shutdown(device_t);
+
+static int	vmxnet3_alloc_resources(struct vmxnet3_softc *);
+static void	vmxnet3_free_resources(struct vmxnet3_softc *);
+static int	vmxnet3_query(struct vmxnet3_softc *);
+
+static int	vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
+static void	vmxnet3_free_shared_data(struct vmxnet3_softc *);
+static int	vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
+static void	vmxnet3_free_txq_data(struct vmxnet3_softc *);
+static int	vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
+static void	vmxnet3_free_rxq_data(struct vmxnet3_softc *);
+static int	vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
+static void	vmxnet3_free_queue_data(struct vmxnet3_softc *);
+static int	vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
+static void	vmxnet3_init_shared_data(struct vmxnet3_softc *);
+static void	vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
+static int	vmxnet3_alloc_data(struct vmxnet3_softc *);
+static void	vmxnet3_free_data(struct vmxnet3_softc *);
+static int	vmxnet3_setup_interface(struct vmxnet3_softc *);
+
+static void	vmxnet3_evintr(struct vmxnet3_softc *);
+static void	vmxnet3_txeof(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
+static void	vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
+static int	vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
+static void	vmxnet3_rxeof(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
+static void	vmxnet3_intr(void *);
+
+static void	vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
+static void	vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
+static void	vmxnet3_stop(struct vmxnet3_softc *);
+
+static void	vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
+static int	vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
+static int	vmxnet3_reset_queues(struct vmxnet3_softc *);
+static void	vmxnet3_init_locked(struct vmxnet3_softc *);
+static void	vmxnet3_init(void *);
+
+static int	vmxnet3_encap_offload_ctx(struct mbuf *, int *, int *, int *);
+static int	vmxnet3_encap_load_mbuf(struct vmxnet3_softc *,
+		    struct vmxnet3_txring *, struct mbuf **, bus_dmamap_t,
+		    bus_dma_segment_t [], int *);
+static int	vmxnet3_encap(struct vmxnet3_softc *, struct mbuf **);
+static void	vmxnet3_start_locked(struct ifnet *);
+static void	vmxnet3_start(struct ifnet *);
+
+static void	vmxnet3_set_rxfilter(struct vmxnet3_softc *);
+static int	vmxnet3_change_mtu(struct vmxnet3_softc *, int);
+static int	vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
+
+static void	vmxnet3_watchdog(struct vmxnet3_softc *);
+static void	vmxnet3_tick(void *);
+static void	vmxnet3_link_state(struct vmxnet3_softc *);
+static void	vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
+static int	vmxnet3_media_change(struct ifnet *);
+static void	vmxnet3_set_lladdr(struct vmxnet3_softc *);
+static void	vmxnet3_get_lladdr(struct vmxnet3_softc *);
+
+static uint32_t	vmxnet3_read_bar0(struct vmxnet3_softc *, bus_size_t);
+static void	vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
+		    uint32_t);
+static uint32_t	vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
+static void	vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
+		    uint32_t);
+static void	vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
+static uint32_t	vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
+
+static void	vmxnet3_enable_intr(struct vmxnet3_softc *, int);
+static void	vmxnet3_disable_intr(struct vmxnet3_softc *, int);
+static void	vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
+static void	vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
+
+static int	vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
+		    bus_size_t, struct vmxnet3_dma_alloc *);
+static void	vmxnet3_dma_free(struct vmxnet3_softc *,
+		    struct vmxnet3_dma_alloc *);
+
+static device_method_t vmxnet3_methods[] = {
+	/* Device interface. */
+	DEVMETHOD(device_probe,		vmxnet3_probe),
+	DEVMETHOD(device_attach,	vmxnet3_attach),
+	DEVMETHOD(device_detach,	vmxnet3_detach),
+	DEVMETHOD(device_shutdown,	vmxnet3_shutdown),
+
+	DEVMETHOD_END
+};
+
+static driver_t vmxnet3_driver = {
+	"vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
+};
+
+static devclass_t vmxnet3_devclass;
+DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
+
+MODULE_DEPEND(vmx, pci, 1, 1, 1);
+MODULE_DEPEND(vmx, ether, 1, 1, 1);
+
+#define VMXNET3_VMWARE_VENDOR_ID	0x15AD
+#define VMXNET3_VMWARE_DEVICE_ID	0x07B0
+
+static int
+vmxnet3_probe(device_t dev)
+{
+
+	if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
+	    pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
+		device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
+		return (BUS_PROBE_DEFAULT);
+	}
+
+	return (ENXIO);
+}
+
+static int
+vmxnet3_attach(device_t dev)
+{
+	struct vmxnet3_softc *sc;
+	int error;
+
+	sc = device_get_softc(dev);
+	sc->vmx_dev = dev;
+
+	VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
+	VMXNET3_RX_LOCK_INIT(sc, device_get_nameunit(dev));
+	VMXNET3_TX_LOCK_INIT(sc, device_get_nameunit(dev));
+	callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
+
+	error = vmxnet3_alloc_resources(sc);
+	if (error)
+		goto fail;
+
+	error = vmxnet3_query(sc);
+	if (error)
+		goto fail;
+
+	error = vmxnet3_alloc_data(sc);
+	if (error)
+		goto fail;
+
+	error = vmxnet3_setup_interface(sc);
+	if (error)
+		goto fail;
+
+	error = bus_setup_intr(dev, sc->vmx_irq, INTR_TYPE_NET | INTR_MPSAFE,
+	    NULL, vmxnet3_intr, sc, &sc->vmx_intrhand);
+	if (error) {
+		ether_ifdetach(sc->vmx_ifp);
+		device_printf(dev, "could not set up interrupt\n");
+		goto fail;
+	}
+
+	vmxnet3_link_state(sc);
+
+fail:
+	if (error)
+		vmxnet3_detach(dev);
+
+	return (error);
+}
+
+static int
+vmxnet3_detach(device_t dev)
+{
+	struct vmxnet3_softc *sc;
+	struct ifnet *ifp;	
+
+	sc = device_get_softc(dev);
+	ifp = sc->vmx_ifp;
+
+	if (device_is_attached(dev)) {
+		ether_ifdetach(ifp);
+		VMXNET3_CORE_LOCK(sc);
+		vmxnet3_stop(sc);
+		VMXNET3_CORE_UNLOCK(sc);
+		callout_drain(&sc->vmx_tick);
+	}
+
+	if (sc->vmx_intrhand != NULL) {
+		bus_teardown_intr(dev, sc->vmx_irq, sc->vmx_intrhand);
+		sc->vmx_intrhand = NULL;
+	}
+
+	if (ifp != NULL) {
+		if_free(ifp);
+		sc->vmx_ifp = NULL;
+	}
+
+	if (sc->vmx_irq != NULL) {
+		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vmx_irq);
+		sc->vmx_irq = NULL;
+	}
+
+	vmxnet3_free_data(sc);
+	vmxnet3_free_resources(sc);
+
+	VMXNET3_CORE_LOCK_DESTROY(sc);
+	VMXNET3_TX_LOCK_DESTROY(sc);
+	VMXNET3_RX_LOCK_DESTROY(sc);
+
+	return (0);
+}
+
+static int
+vmxnet3_shutdown(device_t dev)
+{
+
+	return (0);
+}
+
+static int
+vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	int rid;
+
+	dev = sc->vmx_dev;
+
+	rid = PCIR_BAR(0);
+	sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+	    RF_ACTIVE);
+	if (sc->vmx_res0 == NULL) {
+		device_printf(dev,
+		    "could not map BAR0 memory\n");
+		return (ENXIO);
+	}
+
+	sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
+	sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
+
+	rid = PCIR_BAR(1);
+	sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+	    RF_ACTIVE);
+	if (sc->vmx_res1 == NULL) {
+		device_printf(dev,
+		    "could not map BAR1 memory\n");
+		return (ENXIO);
+	}
+
+	sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
+	sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
+
+	rid = 0;
+	sc->vmx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+	   RF_SHAREABLE | RF_ACTIVE);
+	if (sc->vmx_irq == NULL) {
+		device_printf(dev, "could not allocate interrupt resource\n");
+		return (ENXIO);
+	}
+
+	return (0);
+}
+
+static void
+vmxnet3_free_resources(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	int rid;
+
+	dev = sc->vmx_dev;
+
+	if (sc->vmx_res0 != NULL) {
+		rid = PCIR_BAR(0);
+		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
+		sc->vmx_res0 = NULL;
+	}
+
+	if (sc->vmx_res1 != NULL) {
+		rid = PCIR_BAR(1);
+		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
+		sc->vmx_res1 = NULL;
+	}
+}
+
+static int
+vmxnet3_query(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	uint32_t version;
+
+	dev = sc->vmx_dev;
+
+	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
+	if ((version & 0x01) == 0) {
+		device_printf(dev, "unsupported hardware version %#x\n",
+		    version);
+		return (ENOTSUP);
+	}
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
+
+	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
+	if ((version & 0x01) == 0) {
+		device_printf(dev, "unsupported UPT version %#x\n", version);
+		return (ENOTSUP);
+	}
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
+
+	vmxnet3_get_lladdr(sc);
+
+	return (0);
+}
+
+static int
+vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	uint8_t *kva;
+	size_t size;
+	int i, error;
+
+	dev = sc->vmx_dev;
+
+	size = sizeof(struct vmxnet3_driver_shared);
+	error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
+	if (error) {
+		device_printf(dev, "cannot alloc shared memory\n");
+		return (error);
+	}
+	sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
+
+	size  = VMXNET3_TX_QUEUES * sizeof(struct vmxnet3_txq_shared);
+	size += VMXNET3_RX_QUEUES * sizeof(struct vmxnet3_rxq_shared);
+	error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
+	if (error) {
+		device_printf(dev, "cannot alloc queue shared memory\n");
+		return (error);
+	}
+	sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
+	kva = sc->vmx_qs;
+
+	for (i = 0; i < VMXNET3_TX_QUEUES; i++) {
+		sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
+		kva += sizeof(struct vmxnet3_txq_shared);
+	}
+	for (i = 0; i < VMXNET3_RX_QUEUES; i++) {
+		sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
+		kva += sizeof(struct vmxnet3_rxq_shared);
+	}
+
+	return (0);
+}
+
+static void
+vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
+{
+
+	if (sc->vmx_qs != NULL) {
+		vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
+		sc->vmx_qs = NULL;
+	}
+
+	if (sc->vmx_ds != NULL) {
+		vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
+		sc->vmx_ds = NULL;
+	}
+}
+
+static int
+vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	struct vmxnet3_txqueue *txq;
+	struct vmxnet3_txring *txr;
+	struct vmxnet3_comp_ring *txc;
+	size_t descsz, compsz;
+	int i, q, error;
+
+	dev = sc->vmx_dev;
+	descsz = VMXNET3_TX_NDESC * sizeof(struct vmxnet3_txdesc);
+	compsz = VMXNET3_TX_NCOMPDESC * sizeof(struct vmxnet3_txcompdesc);
+
+	for (q = 0; q < VMXNET3_TX_QUEUES; q++) {
+		txq = &sc->vmx_txq[q];
+		txr = &txq->vxtxq_cmd_ring;
+		txc = &txq->vxtxq_comp_ring;
+
+		error = bus_dma_tag_create(bus_get_dma_tag(dev),
+		    1, 0,			/* alignment, boundary */
+		    BUS_SPACE_MAXADDR,		/* lowaddr */
+		    BUS_SPACE_MAXADDR,		/* highaddr */
+		    NULL, NULL,			/* filter, filterarg */
+		    VMXNET3_TSO_MAXSIZE,	/* maxsize */
+		    VMXNET3_TX_MAXSEGS,		/* nsegments */
+		    PAGE_SIZE,			/* maxsegsize */
+		    0,				/* flags */
+		    NULL, NULL,			/* lockfunc, lockarg */
+		    &txr->vxtxr_txtag);
+		if (error) {
+			device_printf(dev,
+			    "unable to create Tx buffer tag for queue %d\n", q);
+			return (error);
+		}
+
+		error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
+		if (error) {
+			device_printf(dev, "cannot alloc Tx descriptors for "
+			    "queue %d error %d\n", q, error);
+			return (error);
+		}
+		txr->vxtxr_txd =
+		    (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
+
+		error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
+		if (error) {
+			device_printf(dev, "cannot alloc Tx comp descriptors "
+			   "for queue %d error %d\n", q, error);
+			return (error);
+		}
+		txc->vxcr_u.txcd =
+		    (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
+
+		for (i = 0; i < VMXNET3_TX_NDESC; i++) {
+			error = bus_dmamap_create(txr->vxtxr_txtag, 0,
+			    &txr->vxtxr_dmap[i]);
+			if (error) {
+				device_printf(dev, "unable to create Tx buf "
+				    "dmamap for queue %d idx %d\n", q, i);
+				return (error);
+			}
+		}
+	}
+
+	return (0);
+}
+
+static void
+vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	struct vmxnet3_txqueue *txq;
+	struct vmxnet3_txring *txr;
+	struct vmxnet3_comp_ring *txc;
+	int i, q;
+
+	dev = sc->vmx_dev;
+
+	for (q = 0; q < VMXNET3_TX_QUEUES; q++) {
+		txq = &sc->vmx_txq[q];
+		txr = &txq->vxtxq_cmd_ring;
+		txc = &txq->vxtxq_comp_ring;
+
+		for (i = 0; i < VMXNET3_TX_NDESC; i++) {
+			if (txr->vxtxr_dmap[i] != NULL) {
+				bus_dmamap_destroy(txr->vxtxr_txtag,
+				    txr->vxtxr_dmap[i]);
+				txr->vxtxr_dmap[i] = NULL;
+			}
+		}
+
+		if (txc->vxcr_u.txcd != NULL) {
+			vmxnet3_dma_free(sc, &txc->vxcr_dma);
+			txc->vxcr_u.txcd = NULL;
+		}
+
+		if (txr->vxtxr_txd != NULL) {
+			vmxnet3_dma_free(sc, &txr->vxtxr_dma);
+			txr->vxtxr_txd = NULL;
+		}
+
+		if (txr->vxtxr_txtag != NULL) {
+			bus_dma_tag_destroy(txr->vxtxr_txtag);
+			txr->vxtxr_txtag = NULL;
+		}
+	}
+}
+
+static int
+vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	struct vmxnet3_rxqueue *rxq;
+	struct vmxnet3_rxring *rxr;
+	struct vmxnet3_comp_ring *rxc;
+	int descsz, compsz;
+	int i, j, q, error;
+
+	dev = sc->vmx_dev;
+	descsz = VMXNET3_RX_NDESC * sizeof(struct vmxnet3_rxdesc);
+	compsz = VMXNET3_RX_NCOMPDESC * sizeof(struct vmxnet3_rxcompdesc);
+
+	for (q = 0; q < VMXNET3_RX_QUEUES; q++) {
+		rxq = &sc->vmx_rxq[q];
+		rxc = &rxq->vxrxq_comp_ring;
+
+		for (i = 0; i < 2; i++) {
+			rxr = &rxq->vxrxq_cmd_ring[i];
+
+			error = bus_dma_tag_create(bus_get_dma_tag(dev),
+			    1, 0,		/* alignment, boundary */
+			    BUS_SPACE_MAXADDR,	/* lowaddr */
+			    BUS_SPACE_MAXADDR,	/* highaddr */
+			    NULL, NULL,		/* filter, filterarg */
+			    MJUMPAGESIZE,	/* maxsize */
+			    1,			/* nsegments */
+			    MJUMPAGESIZE,	/* maxsegsize */
+			    0,			/* flags */
+			    NULL, NULL,		/* lockfunc, lockarg */
+			    &rxr->vxrxr_rxtag);
+			if (error) {
+				device_printf(dev,
+				    "unable to create Rx buffer tag for queue %d\n", q);
+				return (error);
+			}
+
+			error = vmxnet3_dma_malloc(sc, descsz, 512,
+			    &rxr->vxrxr_dma);
+			if (error) {
+				device_printf(dev, "cannot allocate Rx "
+				    "descriptors for queue %d/%d error %d\n",
+				    i, q, error);
+				return (error);
+			}
+			rxr->vxrxr_rxd =
+			    (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
+		}
+
+		error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
+		if (error) {
+			device_printf(dev, "cannot alloc Rx comp descriptors "
+			    "for queue %d error %d\n", q, error);
+			return (error);
+		}
+		rxc->vxcr_u.rxcd =
+		    (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
+
+		for (i = 0; i < 2; i++) {
+			rxr = &rxq->vxrxq_cmd_ring[i];
+
+			error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
+			    &rxr->vxrxr_spare_dmap);
+			if (error) {
+				device_printf(dev, "unable to create spare "
+				    "dmamap for queue %d/%d error %d\n",
+				    q, i, error);
+				return (error);
+			}
+
+			for (j = 0; j < VMXNET3_RX_NDESC; j++) {
+				error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
+				    &rxr->vxrxr_dmap[j]);
+				if (error) {
+					device_printf(dev, "unable to create "
+					    "dmamap for queue %d/%d slot %d "
+					    "error %d\n",
+					    q, i, j, error);
+					return (error);
+				}
+			}
+		}
+	}
+
+	return (0);
+}
+
+static void
+vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	struct vmxnet3_rxqueue *rxq;
+	struct vmxnet3_rxring *rxr;
+	struct vmxnet3_comp_ring *rxc;
+	int i, j, q;
+
+	dev = sc->vmx_dev;
+
+	for (q = 0; q < VMXNET3_RX_QUEUES; q++) {
+		rxq = &sc->vmx_rxq[q];
+		rxc = &rxq->vxrxq_comp_ring;
+
+		for (i = 0; i < 2; i++) {
+			rxr = &rxq->vxrxq_cmd_ring[i];
+
+			if (rxr->vxrxr_spare_dmap != NULL) {
+				bus_dmamap_destroy(rxr->vxrxr_rxtag,
+				    rxr->vxrxr_spare_dmap);
+				rxr->vxrxr_spare_dmap = NULL;
+			}
+
+			for (j = 0; j < VMXNET3_RX_NDESC; j++) {
+				if (rxr->vxrxr_dmap[j] != NULL) {
+					bus_dmamap_destroy(rxr->vxrxr_rxtag,
+					    rxr->vxrxr_dmap[j]);
+					rxr->vxrxr_dmap[j] = NULL;
+				}
+			}
+		}
+
+		if (rxc->vxcr_u.rxcd != NULL) {
+			vmxnet3_dma_free(sc, &rxc->vxcr_dma);
+			rxc->vxcr_u.rxcd = NULL;
+		}
+
+		for (i = 0; i < 2; i++) {
+			rxr = &rxq->vxrxq_cmd_ring[i];
+
+			if (rxr->vxrxr_rxd != NULL) {
+				vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
+				rxr->vxrxr_rxd = NULL;
+			}
+
+			if (rxr->vxrxr_rxtag != NULL) {
+				bus_dma_tag_destroy(rxr->vxrxr_rxtag);
+				rxr->vxrxr_rxtag = NULL;
+			}
+		}
+	}
+}
+
+static int
+vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
+{
+	int error;
+
+	error = vmxnet3_alloc_txq_data(sc);
+	if (error)
+		return (error);
+
+	error = vmxnet3_alloc_rxq_data(sc);
+	if (error)
+		return (error);
+
+	return (0);
+}
+
+static void
+vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
+{
+
+	vmxnet3_free_rxq_data(sc);
+	vmxnet3_free_txq_data(sc);
+}
+
+static int
+vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
+{
+	int error;
+
+	error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
+	    32, &sc->vmx_mcast_dma);
+	if (error)
+		device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
+	else
+		sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
+
+	return (error);
+}
+
+static void
+vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
+{
+
+	if (sc->vmx_mcast != NULL) {
+		vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
+		sc->vmx_mcast = NULL;
+	}
+}
+
+static void
+vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
+{
+	struct vmxnet3_driver_shared *ds;
+	struct vmxnet3_txqueue *txq;
+	struct vmxnet3_txq_shared *txs;
+	struct vmxnet3_rxqueue *rxq;
+	struct vmxnet3_rxq_shared *rxs;
+	u_int major, minor, release_code, rev;
+	int i;
+
+	ds = sc->vmx_ds;
+	ds->magic = VMXNET3_REV1_MAGIC;
+	ds->version = VMXNET3_DRIVER_VERSION;
+
+	major = __FreeBSD_version / 100000;
+	minor = (__FreeBSD_version / 1000) % 100;
+	release_code = (__FreeBSD_version / 100) % 10;
+	rev = __FreeBSD_version % 100;
+	ds->guest = release_code << 30 | rev << 22 | major << 14 | minor << 6 |
+	    VMXNET3_GOS_FREEBSD;
+#ifdef __LP64__
+	ds->guest |= VMXNET3_GOS_64BIT;
+#else
+	ds->guest |= VMXNET3_GOS_32BIT;
+#endif
+
+	ds->vmxnet3_revision = 1;
+	ds->upt_version = 1;
+	ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN;
+	ds->driver_data = vtophys(sc);
+	ds->driver_data_len = sizeof(struct vmxnet3_softc);
+	ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
+	ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
+	ds->mtu = ETHERMTU;
+	ds->ntxqueue = VMXNET3_TX_QUEUES;
+	ds->nrxqueue = VMXNET3_RX_QUEUES;
+	ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
+	ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
+	ds->automask = 1;
+	ds->nintr = VMXNET3_NINTR;
+	ds->evintr = 0;
+	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
+	for (i = 0; i < VMXNET3_NINTR; i++)
+		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
+
+	for (i = 0; i < VMXNET3_TX_QUEUES; i++) {
+		txq = &sc->vmx_txq[i];
+		txs = txq->vxtxq_ts;
+
+		txs->npending = 0;
+		txs->intr_threshold = 1;
+		txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
+		txs->cmd_ring_len = VMXNET3_TX_NDESC;
+		txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
+		txs->comp_ring_len = VMXNET3_TX_NCOMPDESC;
+		txs->driver_data = vtophys(txq);
+		txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
+		txs->intr_idx = 0;
+		txs->stopped = 1;
+		txs->error = 0;
+	}
+
+	for (i = 0; i < VMXNET3_RX_QUEUES; i++) {
+		rxq = &sc->vmx_rxq[i];
+		rxs = rxq->vxrxq_rs;
+
+		rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
+		rxs->cmd_ring_len[0] = VMXNET3_RX_NDESC;
+		rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
+		rxs->cmd_ring_len[1] = VMXNET3_RX_NDESC;
+		rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
+		rxs->comp_ring_len = VMXNET3_RX_NCOMPDESC;
+		rxs->driver_data = vtophys(rxq);
+		rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
+		rxs->intr_idx = 0;
+		rxs->stopped = 1;
+		rxs->error = 0;
+	}
+
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
+	    sc->vmx_ds_dma.dma_paddr >> 32);
+}
+
+static void
+vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
+{
+
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
+	    sc->vmx_ds_dma.dma_paddr >> 32);
+}
+
+static int
+vmxnet3_alloc_data(struct vmxnet3_softc *sc)
+{
+	int error;
+
+	error = vmxnet3_alloc_shared_data(sc);
+	if (error)
+		return (error);
+
+	error = vmxnet3_alloc_queue_data(sc);
+	if (error)
+		return (error);
+
+	error = vmxnet3_alloc_mcast_table(sc);
+	if (error)
+		return (error);
+
+	vmxnet3_init_shared_data(sc);
+
+	return (0);
+}
+
+static void
+vmxnet3_free_data(struct vmxnet3_softc *sc)
+{
+
+	vmxnet3_free_mcast_table(sc);
+	vmxnet3_free_queue_data(sc);
+	vmxnet3_free_shared_data(sc);
+}
+
+static int
+vmxnet3_setup_interface(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+
+	dev = sc->vmx_dev;
+
+	ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "cannot allocate ifnet structure\n");
+		return (ENOSPC);
+	}
+
+	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+	if_initbaudrate(ifp, IF_Gbps(10)); /* Approx. */
+	ifp->if_softc = sc;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_init = vmxnet3_init;
+	ifp->if_ioctl = vmxnet3_ioctl;
+	ifp->if_start = vmxnet3_start;
+	ifp->if_snd.ifq_drv_maxlen = VMXNET3_TX_NDESC - 1;
+	IFQ_SET_MAXLEN(&ifp->if_snd, VMXNET3_TX_NDESC - 1);
+	IFQ_SET_READY(&ifp->if_snd);
+
+	ether_ifattach(ifp, sc->vmx_lladdr);
+
+	if (sc->vmx_ds->upt_features & UPT1_F_VLAN)
+		ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
+	if (sc->vmx_ds->upt_features & UPT1_F_CSUM) {
+		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
+		ifp->if_hwassist |= VMXNET3_CSUM_FEATURES;
+	}
+
+	ifp->if_capenable = ifp->if_capabilities;
+
+	ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
+	    vmxnet3_media_status);
+	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
+	ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
+
+	return (0);
+}
+
+static void
+vmxnet3_evintr(struct vmxnet3_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+	struct vmxnet3_txq_shared *ts;
+	struct vmxnet3_rxq_shared *rs;
+	uint32_t event;
+	int reset;
+
+	dev = sc->vmx_dev;
+	ifp = sc->vmx_ifp;
+	event = sc->vmx_ds->event;
+	reset = 0;
+
+	/* Clear events. */
+	vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
+
+	if (event & VMXNET3_EVENT_LINK)
+		vmxnet3_link_state(sc);
+
+	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
+		reset = 1;
+		vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
+		ts = sc->vmx_txq[0].vxtxq_ts;
+		if (ts->stopped != 0)
+			device_printf(dev, "TX queue error %#x\n", ts->error);
+		rs = sc->vmx_rxq[0].vxrxq_rs;
+		if (rs->stopped != 0)
+			device_printf(dev, "RX queue error %#x\n", rs->error);
+	}
+
+	if (event & VMXNET3_EVENT_DIC)
+		device_printf(dev, "device implementation change event\n");
+	if (event & VMXNET3_EVENT_DEBUG)
+		device_printf(dev, "debug event\n");
+
+	if (reset != 0) {
+		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+		vmxnet3_init(sc);
+	}
+}
+
+static void
+vmxnet3_txeof(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
+{
+	struct ifnet *ifp;
+	struct vmxnet3_txring *txr;
+	struct vmxnet3_comp_ring *txc;
+	struct vmxnet3_txcompdesc *txcd;
+	u_int sop;
+
+	ifp = sc->vmx_ifp;
+	txr = &txq->vxtxq_cmd_ring;
+	txc = &txq->vxtxq_comp_ring;
+
+	VMXNET3_TX_LOCK_ASSERT(sc);
+
+	for (;;) {
+		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
+		if (txcd->gen != txc->vxcr_gen)
+			break;
+
+		if (++txc->vxcr_next == VMXNET3_TX_NCOMPDESC) {
+			txc->vxcr_next = 0;
+			txc->vxcr_gen ^= 1;
+		}
+
+		sop = txr->vxtxr_next;
+		if (txr->vxtxr_m[sop] != NULL) {
+			bus_dmamap_sync(txr->vxtxr_txtag, txr->vxtxr_dmap[sop],
+				BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(txr->vxtxr_txtag,
+			    txr->vxtxr_dmap[sop]);
+
+			m_freem(txr->vxtxr_m[sop]);
+			txr->vxtxr_m[sop] = NULL;
+
+			ifp->if_opackets++;
+		}
+
+		txr->vxtxr_next = (txcd->eop_idx + 1) % VMXNET3_TX_NDESC;
+	}
+
+	if (txr->vxtxr_head == txr->vxtxr_next)
+		sc->vmx_watchdog_timer = 0;
+}
+
+static void
+vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
+{
+
+	if (rxcd->ipv4 && rxcd->ipcsum_ok)
+		m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
+	if (rxcd->fragment)
+		return;
+	if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
+		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+		m->m_pkthdr.csum_data = 0xFFFF;
+	}
+}
+
+static int
+vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
+{
+	struct ifnet *ifp;
+	struct mbuf *m;
+	struct vmxnet3_rxdesc *rxd;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201307080036.r680ahKV099461>