Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 19 Jul 2016 07:51:23 +0000 (UTC)
From:      Sepherosa Ziehau <sephe@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r303023 - in head/sys: conf dev/hyperv/vmbus modules/hyperv/vmbus
Message-ID:  <201607190751.u6J7pNSv063907@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: sephe
Date: Tue Jul 19 07:51:22 2016
New Revision: 303023
URL: https://svnweb.freebsd.org/changeset/base/303023

Log:
  hyperv/vmbus: Rename laundered vmbus channel code
  
  MFC after:	1 week
  Sponsored by:	Microsoft OSTC
  Differential Revision:	https://reviews.freebsd.org/D7232

Added:
  head/sys/dev/hyperv/vmbus/vmbus_chan.c
     - copied unchanged from r303022, head/sys/dev/hyperv/vmbus/hv_channel.c
Deleted:
  head/sys/dev/hyperv/vmbus/hv_channel.c
Modified:
  head/sys/conf/files.amd64
  head/sys/conf/files.i386
  head/sys/modules/hyperv/vmbus/Makefile

Modified: head/sys/conf/files.amd64
==============================================================================
--- head/sys/conf/files.amd64	Tue Jul 19 06:04:44 2016	(r303022)
+++ head/sys/conf/files.amd64	Tue Jul 19 07:51:22 2016	(r303023)
@@ -270,11 +270,11 @@ dev/hyperv/utilities/hv_kvp.c				optiona
 dev/hyperv/utilities/hv_shutdown.c			optional	hyperv
 dev/hyperv/utilities/hv_timesync.c			optional	hyperv
 dev/hyperv/utilities/hv_util.c				optional	hyperv
-dev/hyperv/vmbus/hv_channel.c				optional	hyperv
 dev/hyperv/vmbus/hv_ring_buffer.c			optional	hyperv
 dev/hyperv/vmbus/hyperv.c				optional	hyperv
 dev/hyperv/vmbus/hyperv_busdma.c			optional	hyperv
 dev/hyperv/vmbus/vmbus.c				optional	hyperv
+dev/hyperv/vmbus/vmbus_chan.c				optional	hyperv
 dev/hyperv/vmbus/vmbus_et.c				optional	hyperv
 dev/hyperv/vmbus/vmbus_if.m				optional	hyperv
 dev/hyperv/vmbus/amd64/hyperv_machdep.c			optional	hyperv

Modified: head/sys/conf/files.i386
==============================================================================
--- head/sys/conf/files.i386	Tue Jul 19 06:04:44 2016	(r303022)
+++ head/sys/conf/files.i386	Tue Jul 19 07:51:22 2016	(r303023)
@@ -246,11 +246,11 @@ dev/hyperv/utilities/hv_kvp.c				optiona
 dev/hyperv/utilities/hv_shutdown.c			optional	hyperv
 dev/hyperv/utilities/hv_timesync.c			optional	hyperv
 dev/hyperv/utilities/hv_util.c				optional	hyperv
-dev/hyperv/vmbus/hv_channel.c				optional	hyperv
 dev/hyperv/vmbus/hv_ring_buffer.c			optional	hyperv
 dev/hyperv/vmbus/hyperv.c				optional	hyperv
 dev/hyperv/vmbus/hyperv_busdma.c			optional	hyperv
 dev/hyperv/vmbus/vmbus.c				optional	hyperv
+dev/hyperv/vmbus/vmbus_chan.c				optional	hyperv
 dev/hyperv/vmbus/vmbus_et.c				optional	hyperv
 dev/hyperv/vmbus/vmbus_if.m				optional	hyperv
 dev/hyperv/vmbus/i386/hyperv_machdep.c			optional	hyperv

Copied: head/sys/dev/hyperv/vmbus/vmbus_chan.c (from r303022, head/sys/dev/hyperv/vmbus/hv_channel.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/hyperv/vmbus/vmbus_chan.c	Tue Jul 19 07:51:22 2016	(r303023, copy of r303022, head/sys/dev/hyperv/vmbus/hv_channel.c)
@@ -0,0 +1,1380 @@
+/*-
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include <dev/hyperv/include/hyperv_busdma.h>
+#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
+#include <dev/hyperv/vmbus/hyperv_var.h>
+#include <dev/hyperv/vmbus/vmbus_reg.h>
+#include <dev/hyperv/vmbus/vmbus_var.h>
+
+static void 	vmbus_chan_signal_tx(struct hv_vmbus_channel *chan);
+static void	vmbus_chan_update_evtflagcnt(struct vmbus_softc *,
+		    const struct hv_vmbus_channel *);
+
+static void	vmbus_chan_task(void *, int);
+static void	vmbus_chan_task_nobatch(void *, int);
+static void	vmbus_chan_detach_task(void *, int);
+
+static void	vmbus_chan_msgproc_choffer(struct vmbus_softc *,
+		    const struct vmbus_message *);
+static void	vmbus_chan_msgproc_chrescind(struct vmbus_softc *,
+		    const struct vmbus_message *);
+
+/*
+ * Vmbus channel message processing.
+ */
+static const vmbus_chanmsg_proc_t
+vmbus_chan_msgprocs[VMBUS_CHANMSG_TYPE_MAX] = {
+	VMBUS_CHANMSG_PROC(CHOFFER,	vmbus_chan_msgproc_choffer),
+	VMBUS_CHANMSG_PROC(CHRESCIND,	vmbus_chan_msgproc_chrescind),
+
+	VMBUS_CHANMSG_PROC_WAKEUP(CHOPEN_RESP),
+	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_CONNRESP),
+	VMBUS_CHANMSG_PROC_WAKEUP(GPADL_DISCONNRESP)
+};
+
+/**
+ *  @brief Trigger an event notification on the specified channel
+ */
+static void
+vmbus_chan_signal_tx(struct hv_vmbus_channel *chan)
+{
+	struct vmbus_softc *sc = chan->vmbus_sc;
+	uint32_t chanid = chan->ch_id;
+
+	atomic_set_long(&sc->vmbus_tx_evtflags[chanid >> VMBUS_EVTFLAG_SHIFT],
+	    1UL << (chanid & VMBUS_EVTFLAG_MASK));
+
+	if (chan->ch_flags & VMBUS_CHAN_FLAG_HASMNF) {
+		atomic_set_int(
+		&sc->vmbus_mnf2->mnf_trigs[chan->ch_montrig_idx].mt_pending,
+		chan->ch_montrig_mask);
+	} else {
+		hypercall_signal_event(chan->ch_monprm_dma.hv_paddr);
+	}
+}
+
+static int
+vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS)
+{
+	struct hv_vmbus_channel *chan = arg1;
+	int mnf = 0;
+
+	if (chan->ch_flags & VMBUS_CHAN_FLAG_HASMNF)
+		mnf = 1;
+	return sysctl_handle_int(oidp, &mnf, 0, req);
+}
+
+static void
+vmbus_chan_sysctl_create(struct hv_vmbus_channel *chan)
+{
+	struct sysctl_oid *ch_tree, *chid_tree, *br_tree;
+	struct sysctl_ctx_list *ctx;
+	uint32_t ch_id;
+	char name[16];
+
+	/*
+	 * Add sysctl nodes related to this channel to this
+	 * channel's sysctl ctx, so that they can be destroyed
+	 * independently upon close of this channel, which can
+	 * happen even if the device is not detached.
+	 */
+	ctx = &chan->ch_sysctl_ctx;
+	sysctl_ctx_init(ctx);
+
+	/*
+	 * Create dev.NAME.UNIT.channel tree.
+	 */
+	ch_tree = SYSCTL_ADD_NODE(ctx,
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(chan->ch_dev)),
+	    OID_AUTO, "channel", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+	if (ch_tree == NULL)
+		return;
+
+	/*
+	 * Create dev.NAME.UNIT.channel.CHANID tree.
+	 */
+	if (VMBUS_CHAN_ISPRIMARY(chan))
+		ch_id = chan->ch_id;
+	else
+		ch_id = chan->ch_prichan->ch_id;
+	snprintf(name, sizeof(name), "%d", ch_id);
+	chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
+	    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+	if (chid_tree == NULL)
+		return;
+
+	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
+		/*
+		 * Create dev.NAME.UNIT.channel.CHANID.sub tree.
+		 */
+		ch_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree),
+		    OID_AUTO, "sub", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+		if (ch_tree == NULL)
+			return;
+
+		/*
+		 * Create dev.NAME.UNIT.channel.CHANID.sub.SUBIDX tree.
+		 *
+		 * NOTE:
+		 * chid_tree is changed to this new sysctl tree.
+		 */
+		snprintf(name, sizeof(name), "%d", chan->ch_subidx);
+		chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
+		    OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+		if (chid_tree == NULL)
+			return;
+
+		SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
+		    "chanid", CTLFLAG_RD, &chan->ch_id, 0, "channel id");
+	}
+
+	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
+	    "cpu", CTLFLAG_RD, &chan->ch_cpuid, 0, "owner CPU id");
+	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
+	    "mnf", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
+	    chan, 0, vmbus_chan_sysctl_mnf, "I",
+	    "has monitor notification facilities");
+
+	/*
+	 * Create sysctl tree for RX bufring.
+	 */
+	br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
+	    "in", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+	if (br_tree != NULL) {
+		hv_ring_buffer_stat(ctx, SYSCTL_CHILDREN(br_tree),
+		    &chan->inbound, "inbound ring buffer stats");
+	}
+
+	/*
+	 * Create sysctl tree for TX bufring.
+	 */
+	br_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
+	    "out", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+	if (br_tree != NULL) {
+		hv_ring_buffer_stat(ctx, SYSCTL_CHILDREN(br_tree),
+		    &chan->outbound, "outbound ring buffer stats");
+	}
+}
+
+int
+vmbus_chan_open(struct hv_vmbus_channel *chan, int txbr_size, int rxbr_size,
+    const void *udata, int udlen, vmbus_chan_callback_t cb, void *cbarg)
+{
+	struct vmbus_softc *sc = chan->vmbus_sc;
+	const struct vmbus_chanmsg_chopen_resp *resp;
+	const struct vmbus_message *msg;
+	struct vmbus_chanmsg_chopen *req;
+	struct vmbus_msghc *mh;
+	uint32_t status;
+	int error;
+	uint8_t *br;
+
+	if (udlen > VMBUS_CHANMSG_CHOPEN_UDATA_SIZE) {
+		device_printf(sc->vmbus_dev,
+		    "invalid udata len %d for chan%u\n", udlen, chan->ch_id);
+		return EINVAL;
+	}
+	KASSERT((txbr_size & PAGE_MASK) == 0,
+	    ("send bufring size is not multiple page"));
+	KASSERT((rxbr_size & PAGE_MASK) == 0,
+	    ("recv bufring size is not multiple page"));
+
+	if (atomic_testandset_int(&chan->ch_stflags,
+	    VMBUS_CHAN_ST_OPENED_SHIFT))
+		panic("double-open chan%u", chan->ch_id);
+
+	chan->ch_cb = cb;
+	chan->ch_cbarg = cbarg;
+
+	vmbus_chan_update_evtflagcnt(sc, chan);
+
+	chan->ch_tq = VMBUS_PCPU_GET(chan->vmbus_sc, event_tq, chan->ch_cpuid);
+	if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
+		TASK_INIT(&chan->ch_task, 0, vmbus_chan_task, chan);
+	else
+		TASK_INIT(&chan->ch_task, 0, vmbus_chan_task_nobatch, chan);
+
+	/*
+	 * Allocate the TX+RX bufrings.
+	 * XXX should use ch_dev dtag
+	 */
+	br = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
+	    PAGE_SIZE, 0, txbr_size + rxbr_size, &chan->ch_bufring_dma,
+	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
+	if (br == NULL) {
+		device_printf(sc->vmbus_dev, "bufring allocation failed\n");
+		error = ENOMEM;
+		goto failed;
+	}
+	chan->ch_bufring = br;
+
+	/* TX bufring comes first */
+	hv_vmbus_ring_buffer_init(&chan->outbound, br, txbr_size);
+	/* RX bufring immediately follows TX bufring */
+	hv_vmbus_ring_buffer_init(&chan->inbound, br + txbr_size, rxbr_size);
+
+	/* Create sysctl tree for this channel */
+	vmbus_chan_sysctl_create(chan);
+
+	/*
+	 * Connect the bufrings, both RX and TX, to this channel.
+	 */
+	error = vmbus_chan_gpadl_connect(chan, chan->ch_bufring_dma.hv_paddr,
+	    txbr_size + rxbr_size, &chan->ch_bufring_gpadl);
+	if (error) {
+		device_printf(sc->vmbus_dev,
+		    "failed to connect bufring GPADL to chan%u\n", chan->ch_id);
+		goto failed;
+	}
+
+	/*
+	 * Open channel w/ the bufring GPADL on the target CPU.
+	 */
+	mh = vmbus_msghc_get(sc, sizeof(*req));
+	if (mh == NULL) {
+		device_printf(sc->vmbus_dev,
+		    "can not get msg hypercall for chopen(chan%u)\n",
+		    chan->ch_id);
+		error = ENXIO;
+		goto failed;
+	}
+
+	req = vmbus_msghc_dataptr(mh);
+	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHOPEN;
+	req->chm_chanid = chan->ch_id;
+	req->chm_openid = chan->ch_id;
+	req->chm_gpadl = chan->ch_bufring_gpadl;
+	req->chm_vcpuid = chan->ch_vcpuid;
+	req->chm_txbr_pgcnt = txbr_size >> PAGE_SHIFT;
+	if (udlen > 0)
+		memcpy(req->chm_udata, udata, udlen);
+
+	error = vmbus_msghc_exec(sc, mh);
+	if (error) {
+		device_printf(sc->vmbus_dev,
+		    "chopen(chan%u) msg hypercall exec failed: %d\n",
+		    chan->ch_id, error);
+		vmbus_msghc_put(sc, mh);
+		goto failed;
+	}
+
+	msg = vmbus_msghc_wait_result(sc, mh);
+	resp = (const struct vmbus_chanmsg_chopen_resp *)msg->msg_data;
+	status = resp->chm_status;
+
+	vmbus_msghc_put(sc, mh);
+
+	if (status == 0) {
+		if (bootverbose) {
+			device_printf(sc->vmbus_dev, "chan%u opened\n",
+			    chan->ch_id);
+		}
+		return 0;
+	}
+
+	device_printf(sc->vmbus_dev, "failed to open chan%u\n", chan->ch_id);
+	error = ENXIO;
+
+failed:
+	if (chan->ch_bufring_gpadl) {
+		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
+		chan->ch_bufring_gpadl = 0;
+	}
+	if (chan->ch_bufring != NULL) {
+		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
+		chan->ch_bufring = NULL;
+	}
+	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
+	return error;
+}
+
+int
+vmbus_chan_gpadl_connect(struct hv_vmbus_channel *chan, bus_addr_t paddr,
+    int size, uint32_t *gpadl0)
+{
+	struct vmbus_softc *sc = chan->vmbus_sc;
+	struct vmbus_msghc *mh;
+	struct vmbus_chanmsg_gpadl_conn *req;
+	const struct vmbus_message *msg;
+	size_t reqsz;
+	uint32_t gpadl, status;
+	int page_count, range_len, i, cnt, error;
+	uint64_t page_id;
+
+	/*
+	 * Preliminary checks.
+	 */
+
+	KASSERT((size & PAGE_MASK) == 0,
+	    ("invalid GPA size %d, not multiple page size", size));
+	page_count = size >> PAGE_SHIFT;
+
+	KASSERT((paddr & PAGE_MASK) == 0,
+	    ("GPA is not page aligned %jx", (uintmax_t)paddr));
+	page_id = paddr >> PAGE_SHIFT;
+
+	range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
+	/*
+	 * We don't support multiple GPA ranges.
+	 */
+	if (range_len > UINT16_MAX) {
+		device_printf(sc->vmbus_dev, "GPA too large, %d pages\n",
+		    page_count);
+		return EOPNOTSUPP;
+	}
+
+	/*
+	 * Allocate GPADL id.
+	 */
+	gpadl = vmbus_gpadl_alloc(sc);
+	*gpadl0 = gpadl;
+
+	/*
+	 * Connect this GPADL to the target channel.
+	 *
+	 * NOTE:
+	 * Since each message can only hold small set of page
+	 * addresses, several messages may be required to
+	 * complete the connection.
+	 */
+	if (page_count > VMBUS_CHANMSG_GPADL_CONN_PGMAX)
+		cnt = VMBUS_CHANMSG_GPADL_CONN_PGMAX;
+	else
+		cnt = page_count;
+	page_count -= cnt;
+
+	reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
+	    chm_range.gpa_page[cnt]);
+	mh = vmbus_msghc_get(sc, reqsz);
+	if (mh == NULL) {
+		device_printf(sc->vmbus_dev,
+		    "can not get msg hypercall for gpadl->chan%u\n",
+		    chan->ch_id);
+		return EIO;
+	}
+
+	req = vmbus_msghc_dataptr(mh);
+	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_CONN;
+	req->chm_chanid = chan->ch_id;
+	req->chm_gpadl = gpadl;
+	req->chm_range_len = range_len;
+	req->chm_range_cnt = 1;
+	req->chm_range.gpa_len = size;
+	req->chm_range.gpa_ofs = 0;
+	for (i = 0; i < cnt; ++i)
+		req->chm_range.gpa_page[i] = page_id++;
+
+	error = vmbus_msghc_exec(sc, mh);
+	if (error) {
+		device_printf(sc->vmbus_dev,
+		    "gpadl->chan%u msg hypercall exec failed: %d\n",
+		    chan->ch_id, error);
+		vmbus_msghc_put(sc, mh);
+		return error;
+	}
+
+	while (page_count > 0) {
+		struct vmbus_chanmsg_gpadl_subconn *subreq;
+
+		if (page_count > VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX)
+			cnt = VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX;
+		else
+			cnt = page_count;
+		page_count -= cnt;
+
+		reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
+		    chm_gpa_page[cnt]);
+		vmbus_msghc_reset(mh, reqsz);
+
+		subreq = vmbus_msghc_dataptr(mh);
+		subreq->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_SUBCONN;
+		subreq->chm_gpadl = gpadl;
+		for (i = 0; i < cnt; ++i)
+			subreq->chm_gpa_page[i] = page_id++;
+
+		vmbus_msghc_exec_noresult(mh);
+	}
+	KASSERT(page_count == 0, ("invalid page count %d", page_count));
+
+	msg = vmbus_msghc_wait_result(sc, mh);
+	status = ((const struct vmbus_chanmsg_gpadl_connresp *)
+	    msg->msg_data)->chm_status;
+
+	vmbus_msghc_put(sc, mh);
+
+	if (status != 0) {
+		device_printf(sc->vmbus_dev, "gpadl->chan%u failed: "
+		    "status %u\n", chan->ch_id, status);
+		return EIO;
+	} else {
+		if (bootverbose) {
+			device_printf(sc->vmbus_dev, "gpadl->chan%u "
+			    "succeeded\n", chan->ch_id);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Disconnect the GPA from the target channel
+ */
+int
+vmbus_chan_gpadl_disconnect(struct hv_vmbus_channel *chan, uint32_t gpadl)
+{
+	struct vmbus_softc *sc = chan->vmbus_sc;
+	struct vmbus_msghc *mh;
+	struct vmbus_chanmsg_gpadl_disconn *req;
+	int error;
+
+	mh = vmbus_msghc_get(sc, sizeof(*req));
+	if (mh == NULL) {
+		device_printf(sc->vmbus_dev,
+		    "can not get msg hypercall for gpa x->chan%u\n",
+		    chan->ch_id);
+		return EBUSY;
+	}
+
+	req = vmbus_msghc_dataptr(mh);
+	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_GPADL_DISCONN;
+	req->chm_chanid = chan->ch_id;
+	req->chm_gpadl = gpadl;
+
+	error = vmbus_msghc_exec(sc, mh);
+	if (error) {
+		device_printf(sc->vmbus_dev,
+		    "gpa x->chan%u msg hypercall exec failed: %d\n",
+		    chan->ch_id, error);
+		vmbus_msghc_put(sc, mh);
+		return error;
+	}
+
+	vmbus_msghc_wait_result(sc, mh);
+	/* Discard result; no useful information */
+	vmbus_msghc_put(sc, mh);
+
+	return 0;
+}
+
+static void
+vmbus_chan_close_internal(struct hv_vmbus_channel *chan)
+{
+	struct vmbus_softc *sc = chan->vmbus_sc;
+	struct vmbus_msghc *mh;
+	struct vmbus_chanmsg_chclose *req;
+	struct taskqueue *tq = chan->ch_tq;
+	int error;
+
+	/* TODO: stringent check */
+	atomic_clear_int(&chan->ch_stflags, VMBUS_CHAN_ST_OPENED);
+
+	/*
+	 * Free this channel's sysctl tree attached to its device's
+	 * sysctl tree.
+	 */
+	sysctl_ctx_free(&chan->ch_sysctl_ctx);
+
+	/*
+	 * Set ch_tq to NULL to avoid more requests be scheduled.
+	 * XXX pretty broken; need rework.
+	 */
+	chan->ch_tq = NULL;
+	taskqueue_drain(tq, &chan->ch_task);
+	chan->ch_cb = NULL;
+
+	/*
+	 * Close this channel.
+	 */
+	mh = vmbus_msghc_get(sc, sizeof(*req));
+	if (mh == NULL) {
+		device_printf(sc->vmbus_dev,
+		    "can not get msg hypercall for chclose(chan%u)\n",
+		    chan->ch_id);
+		return;
+	}
+
+	req = vmbus_msghc_dataptr(mh);
+	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHCLOSE;
+	req->chm_chanid = chan->ch_id;
+
+	error = vmbus_msghc_exec_noresult(mh);
+	vmbus_msghc_put(sc, mh);
+
+	if (error) {
+		device_printf(sc->vmbus_dev,
+		    "chclose(chan%u) msg hypercall exec failed: %d\n",
+		    chan->ch_id, error);
+		return;
+	} else if (bootverbose) {
+		device_printf(sc->vmbus_dev, "close chan%u\n", chan->ch_id);
+	}
+
+	/*
+	 * Disconnect the TX+RX bufrings from this channel.
+	 */
+	if (chan->ch_bufring_gpadl) {
+		vmbus_chan_gpadl_disconnect(chan, chan->ch_bufring_gpadl);
+		chan->ch_bufring_gpadl = 0;
+	}
+
+	/*
+	 * Destroy the TX+RX bufrings.
+	 */
+	hv_ring_buffer_cleanup(&chan->outbound);
+	hv_ring_buffer_cleanup(&chan->inbound);
+	if (chan->ch_bufring != NULL) {
+		hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
+		chan->ch_bufring = NULL;
+	}
+}
+
+/*
+ * Caller should make sure that all sub-channels have
+ * been added to 'chan' and all to-be-closed channels
+ * are not being opened.
+ */
+void
+vmbus_chan_close(struct hv_vmbus_channel *chan)
+{
+	int subchan_cnt;
+
+	if (!VMBUS_CHAN_ISPRIMARY(chan)) {
+		/*
+		 * Sub-channel is closed when its primary channel
+		 * is closed; done.
+		 */
+		return;
+	}
+
+	/*
+	 * Close all sub-channels, if any.
+	 */
+	subchan_cnt = chan->ch_subchan_cnt;
+	if (subchan_cnt > 0) {
+		struct hv_vmbus_channel **subchan;
+		int i;
+
+		subchan = vmbus_subchan_get(chan, subchan_cnt);
+		for (i = 0; i < subchan_cnt; ++i)
+			vmbus_chan_close_internal(subchan[i]);
+		vmbus_subchan_rel(subchan, subchan_cnt);
+	}
+
+	/* Then close the primary channel. */
+	vmbus_chan_close_internal(chan);
+}
+
+int
+vmbus_chan_send(struct hv_vmbus_channel *chan, uint16_t type, uint16_t flags,
+    void *data, int dlen, uint64_t xactid)
+{
+	struct vmbus_chanpkt pkt;
+	int pktlen, pad_pktlen, hlen, error;
+	uint64_t pad = 0;
+	struct iovec iov[3];
+	boolean_t send_evt;
+
+	hlen = sizeof(pkt);
+	pktlen = hlen + dlen;
+	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
+
+	pkt.cp_hdr.cph_type = type;
+	pkt.cp_hdr.cph_flags = flags;
+	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
+	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
+	pkt.cp_hdr.cph_xactid = xactid;
+
+	iov[0].iov_base = &pkt;
+	iov[0].iov_len = hlen;
+	iov[1].iov_base = data;
+	iov[1].iov_len = dlen;
+	iov[2].iov_base = &pad;
+	iov[2].iov_len = pad_pktlen - pktlen;
+
+	error = hv_ring_buffer_write(&chan->outbound, iov, 3, &send_evt);
+	if (!error && send_evt)
+		vmbus_chan_signal_tx(chan);
+	return error;
+}
+
+int
+vmbus_chan_send_sglist(struct hv_vmbus_channel *chan,
+    struct vmbus_gpa sg[], int sglen, void *data, int dlen, uint64_t xactid)
+{
+	struct vmbus_chanpkt_sglist pkt;
+	int pktlen, pad_pktlen, hlen, error;
+	struct iovec iov[4];
+	boolean_t send_evt;
+	uint64_t pad = 0;
+
+	KASSERT(sglen < VMBUS_CHAN_SGLIST_MAX,
+	    ("invalid sglist len %d", sglen));
+
+	hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
+	pktlen = hlen + dlen;
+	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
+
+	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
+	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
+	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
+	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
+	pkt.cp_hdr.cph_xactid = xactid;
+	pkt.cp_rsvd = 0;
+	pkt.cp_gpa_cnt = sglen;
+
+	iov[0].iov_base = &pkt;
+	iov[0].iov_len = sizeof(pkt);
+	iov[1].iov_base = sg;
+	iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
+	iov[2].iov_base = data;
+	iov[2].iov_len = dlen;
+	iov[3].iov_base = &pad;
+	iov[3].iov_len = pad_pktlen - pktlen;
+
+	error = hv_ring_buffer_write(&chan->outbound, iov, 4, &send_evt);
+	if (!error && send_evt)
+		vmbus_chan_signal_tx(chan);
+	return error;
+}
+
+int
+vmbus_chan_send_prplist(struct hv_vmbus_channel *chan,
+    struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen,
+    uint64_t xactid)
+{
+	struct vmbus_chanpkt_prplist pkt;
+	int pktlen, pad_pktlen, hlen, error;
+	struct iovec iov[4];
+	boolean_t send_evt;
+	uint64_t pad = 0;
+
+	KASSERT(prp_cnt < VMBUS_CHAN_PRPLIST_MAX,
+	    ("invalid prplist entry count %d", prp_cnt));
+
+	hlen = __offsetof(struct vmbus_chanpkt_prplist,
+	    cp_range[0].gpa_page[prp_cnt]);
+	pktlen = hlen + dlen;
+	pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
+
+	pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
+	pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
+	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_hlen, hlen);
+	VMBUS_CHANPKT_SETLEN(pkt.cp_hdr.cph_tlen, pad_pktlen);
+	pkt.cp_hdr.cph_xactid = xactid;
+	pkt.cp_rsvd = 0;
+	pkt.cp_range_cnt = 1;
+
+	iov[0].iov_base = &pkt;
+	iov[0].iov_len = sizeof(pkt);
+	iov[1].iov_base = prp;
+	iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]);
+	iov[2].iov_base = data;
+	iov[2].iov_len = dlen;
+	iov[3].iov_base = &pad;
+	iov[3].iov_len = pad_pktlen - pktlen;
+
+	error = hv_ring_buffer_write(&chan->outbound, iov, 4, &send_evt);
+	if (!error && send_evt)
+		vmbus_chan_signal_tx(chan);
+	return error;
+}
+
+int
+vmbus_chan_recv(struct hv_vmbus_channel *chan, void *data, int *dlen0,
+    uint64_t *xactid)
+{
+	struct vmbus_chanpkt_hdr pkt;
+	int error, dlen, hlen;
+
+	error = hv_ring_buffer_peek(&chan->inbound, &pkt, sizeof(pkt));
+	if (error)
+		return error;
+
+	hlen = VMBUS_CHANPKT_GETLEN(pkt.cph_hlen);
+	dlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen) - hlen;
+
+	if (*dlen0 < dlen) {
+		/* Return the size of this packet's data. */
+		*dlen0 = dlen;
+		return ENOBUFS;
+	}
+
+	*xactid = pkt.cph_xactid;
+	*dlen0 = dlen;
+
+	/* Skip packet header */
+	error = hv_ring_buffer_read(&chan->inbound, data, dlen, hlen);
+	KASSERT(!error, ("hv_ring_buffer_read failed"));
+
+	return 0;
+}
+
+int
+vmbus_chan_recv_pkt(struct hv_vmbus_channel *chan,
+    struct vmbus_chanpkt_hdr *pkt0, int *pktlen0)
+{
+	struct vmbus_chanpkt_hdr pkt;
+	int error, pktlen;
+
+	error = hv_ring_buffer_peek(&chan->inbound, &pkt, sizeof(pkt));
+	if (error)
+		return error;
+
+	pktlen = VMBUS_CHANPKT_GETLEN(pkt.cph_tlen);
+	if (*pktlen0 < pktlen) {
+		/* Return the size of this packet. */
+		*pktlen0 = pktlen;
+		return ENOBUFS;
+	}
+	*pktlen0 = pktlen;
+
+	/* Include packet header */
+	error = hv_ring_buffer_read(&chan->inbound, pkt0, pktlen, 0);
+	KASSERT(!error, ("hv_ring_buffer_read failed"));
+
+	return 0;
+}
+
+static void
+vmbus_chan_task(void *xchan, int pending __unused)
+{
+	struct hv_vmbus_channel *chan = xchan;
+	vmbus_chan_callback_t cb = chan->ch_cb;
+	void *cbarg = chan->ch_cbarg;
+
+	/*
+	 * Optimize host to guest signaling by ensuring:
+	 * 1. While reading the channel, we disable interrupts from
+	 *    host.
+	 * 2. Ensure that we process all posted messages from the host
+	 *    before returning from this callback.
+	 * 3. Once we return, enable signaling from the host. Once this
+	 *    state is set we check to see if additional packets are
+	 *    available to read. In this case we repeat the process.
+	 *
+	 * NOTE: Interrupt has been disabled in the ISR.
+	 */
+	for (;;) {
+		uint32_t left;
+
+		cb(cbarg);
+
+		left = hv_ring_buffer_read_end(&chan->inbound);
+		if (left == 0) {
+			/* No more data in RX bufring; done */
+			break;
+		}
+		hv_ring_buffer_read_begin(&chan->inbound);
+	}
+}
+
+static void
+vmbus_chan_task_nobatch(void *xchan, int pending __unused)
+{
+	struct hv_vmbus_channel *chan = xchan;
+
+	chan->ch_cb(chan->ch_cbarg);
+}
+
+static __inline void
+vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
+    int flag_cnt)
+{
+	int f;
+
+	for (f = 0; f < flag_cnt; ++f) {
+		uint32_t chid_base;
+		u_long flags;
+		int chid_ofs;
+
+		if (event_flags[f] == 0)
+			continue;
+
+		flags = atomic_swap_long(&event_flags[f], 0);
+		chid_base = f << VMBUS_EVTFLAG_SHIFT;
+
+		while ((chid_ofs = ffsl(flags)) != 0) {
+			struct hv_vmbus_channel *chan;
+
+			--chid_ofs; /* NOTE: ffsl is 1-based */
+			flags &= ~(1UL << chid_ofs);
+
+			chan = sc->vmbus_chmap[chid_base + chid_ofs];
+
+			/* if channel is closed or closing */
+			if (chan == NULL || chan->ch_tq == NULL)
+				continue;
+
+			if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
+				hv_ring_buffer_read_begin(&chan->inbound);
+			taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
+		}
+	}
+}
+
+void
+vmbus_event_proc(struct vmbus_softc *sc, int cpu)
+{
+	struct vmbus_evtflags *eventf;
+
+	/*
+	 * On Host with Win8 or above, the event page can be checked directly
+	 * to get the id of the channel that has the pending interrupt.
+	 */
+	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
+	vmbus_event_flags_proc(sc, eventf->evt_flags,
+	    VMBUS_PCPU_GET(sc, event_flags_cnt, cpu));
+}
+
+void
+vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
+{
+	struct vmbus_evtflags *eventf;
+
+	eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
+	if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
+		vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
+		    VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
+	}
+}
+
+static void
+vmbus_chan_update_evtflagcnt(struct vmbus_softc *sc,
+    const struct hv_vmbus_channel *chan)
+{
+	volatile int *flag_cnt_ptr;
+	int flag_cnt;
+
+	flag_cnt = (chan->ch_id / VMBUS_EVTFLAG_LEN) + 1;
+	flag_cnt_ptr = VMBUS_PCPU_PTR(sc, event_flags_cnt, chan->ch_cpuid);
+
+	for (;;) {
+		int old_flag_cnt;
+
+		old_flag_cnt = *flag_cnt_ptr;
+		if (old_flag_cnt >= flag_cnt)
+			break;
+		if (atomic_cmpset_int(flag_cnt_ptr, old_flag_cnt, flag_cnt)) {
+			if (bootverbose) {
+				device_printf(sc->vmbus_dev,
+				    "channel%u update cpu%d flag_cnt to %d\n",
+				    chan->ch_id, chan->ch_cpuid, flag_cnt);
+			}
+			break;
+		}
+	}
+}
+
+static struct hv_vmbus_channel *
+vmbus_chan_alloc(struct vmbus_softc *sc)
+{
+	struct hv_vmbus_channel *chan;
+
+	chan = malloc(sizeof(*chan), M_DEVBUF, M_WAITOK | M_ZERO);
+
+	chan->ch_monprm = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev),
+	    HYPERCALL_PARAM_ALIGN, 0, sizeof(struct hyperv_mon_param),
+	    &chan->ch_monprm_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
+	if (chan->ch_monprm == NULL) {
+		device_printf(sc->vmbus_dev, "monprm alloc failed\n");
+		free(chan, M_DEVBUF);
+		return NULL;
+	}
+
+	chan->vmbus_sc = sc;
+	mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF);
+	TAILQ_INIT(&chan->ch_subchans);
+	TASK_INIT(&chan->ch_detach_task, 0, vmbus_chan_detach_task, chan);
+
+	return chan;
+}
+
+static void
+vmbus_chan_free(struct hv_vmbus_channel *chan)
+{
+	/* TODO: assert sub-channel list is empty */
+	/* TODO: asset no longer on the primary channel's sub-channel list */
+	/* TODO: asset no longer on the vmbus channel list */
+	hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm);
+	mtx_destroy(&chan->ch_subchan_lock);
+	free(chan, M_DEVBUF);
+}
+
+static int
+vmbus_chan_add(struct hv_vmbus_channel *newchan)
+{
+	struct vmbus_softc *sc = newchan->vmbus_sc;
+	struct hv_vmbus_channel *prichan;
+
+	if (newchan->ch_id == 0) {
+		/*
+		 * XXX
+		 * Chan0 will neither be processed nor should be offered;
+		 * skip it.
+		 */
+		device_printf(sc->vmbus_dev, "got chan0 offer, discard\n");
+		return EINVAL;
+	} else if (newchan->ch_id >= VMBUS_CHAN_MAX) {
+		device_printf(sc->vmbus_dev, "invalid chan%u offer\n",
+		    newchan->ch_id);
+		return EINVAL;
+	}
+	sc->vmbus_chmap[newchan->ch_id] = newchan;
+

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201607190751.u6J7pNSv063907>