Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 22 Jul 2010 15:38:36 +0000 (UTC)
From:      "Kenneth D. Merry" <ken@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r210376 - stable/8/sys/dev/mpt
Message-ID:  <201007221538.o6MFcaqe087559@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: ken
Date: Thu Jul 22 15:38:36 2010
New Revision: 210376
URL: http://svn.freebsd.org/changeset/base/210376

Log:
  MFC 209599, 209960:
  
  r209960 | marius
  
    - Make the maxsize parameter of the data buffer DMA tag match maxio, which
      was missed in r209599.
      Reported and tested by: Michael Moll
    - Declare mpt_dma_buf_alloc() static just like mpt_dma_buf_free(), both are
      used in mpt.c only.
  
    Reviewed by:    ken
    MFC after:      r209599
  
  r209599 | ken
  
    Change the mpt driver to allow larger I/O sizes.
  
    The mpt driver previously didn't report a 'maxio' size to CAM, and so the
    da(4) driver limited I/O sizes to DFLTPHYS (64K) by default.  The number
    of scatter gather segments allowed, as reported to busdma, was
    (128K / PAGE_SIZE) + 1, or 33 on architectures with 4K pages.
  
    Change things around so that we wait until we've determined how many
    segments the adapter can support before creating the busdma tag used for
    buffers, so we can potentially support more S/G segments and therefore
    larger I/O sizes.
  
    Also, fix some things that were broken about the module unload path.  It
    still gets hung up inside CAM, though.
  
    mpt.c:	Move some busdma initialization calls in here, and call
  		them just after we've gotten the IOCFacts, and know how
  		many S/G segments this adapter can support.
  
    mpt.h:	Get rid of MPT_MAXPHYS, it is no longer used.
  
  		Add max_cam_seg_cnt, which is used to report our maximum
  		I/O size up to CAM.
  
    mpt_cam.c:	Use max_cam_seg_cnt to report our maximum I/O size to CAM.
  
  		Fix the locking in mpt_cam_detach().
  
    mpt_pci.c:	Pull some busdma initialization and teardown out and put
  		it in mpt.c.  We now delay it until we know many scatter
  		gather segments the adapter can support, and therefore
  		how to setup our busdma tags.
  
    mpt_raid.c:	Make sure we wake up the right wait channel to get the
  		raid thread to wake up when we're trying to shut it down.
  
    Reviewed by:	gibbs, mjacob
    MFC after:	2 weeks

Modified:
  stable/8/sys/dev/mpt/mpt.c
  stable/8/sys/dev/mpt/mpt.h
  stable/8/sys/dev/mpt/mpt_cam.c
  stable/8/sys/dev/mpt/mpt_pci.c
  stable/8/sys/dev/mpt/mpt_raid.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/xen/xenpci/   (props changed)

Modified: stable/8/sys/dev/mpt/mpt.c
==============================================================================
--- stable/8/sys/dev/mpt/mpt.c	Thu Jul 22 14:52:51 2010	(r210375)
+++ stable/8/sys/dev/mpt/mpt.c	Thu Jul 22 15:38:36 2010	(r210376)
@@ -128,6 +128,8 @@ static void mpt_send_event_ack(struct mp
 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
 static int mpt_soft_reset(struct mpt_softc *mpt);
 static void mpt_hard_reset(struct mpt_softc *mpt);
+static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
+static void mpt_dma_buf_free(struct mpt_softc *mpt);
 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
 
@@ -2246,14 +2248,6 @@ mpt_core_attach(struct mpt_softc *mpt)
 	TAILQ_INIT(&mpt->request_pending_list);
 	TAILQ_INIT(&mpt->request_free_list);
 	TAILQ_INIT(&mpt->request_timeout_list);
-	MPT_LOCK(mpt);
-	for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
-		request_t *req = &mpt->request_pool[val];
-		req->state = REQ_STATE_ALLOCATED;
-		mpt_callout_init(mpt, &req->callout);
-		mpt_free_request(mpt, req);
-	}
-	MPT_UNLOCK(mpt);
 	for (val = 0; val < MPT_MAX_LUNS; val++) {
 		STAILQ_INIT(&mpt->trt[val].atios);
 		STAILQ_INIT(&mpt->trt[val].inots);
@@ -2346,6 +2340,8 @@ mpt_core_detach(struct mpt_softc *mpt)
 		request_t *req = &mpt->request_pool[val];
 		mpt_callout_drain(mpt, &req->callout);
 	}
+
+	mpt_dma_buf_free(mpt);
 }
 
 int
@@ -2480,6 +2476,105 @@ mpt_download_fw(struct mpt_softc *mpt)
 	return (0);
 }
 
+static int
+mpt_dma_buf_alloc(struct mpt_softc *mpt)
+{
+	struct mpt_map_info mi;
+	uint8_t *vptr;
+	uint32_t pptr, end;
+	int i, error;
+
+	/* Create a child tag for data buffers */
+	if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
+	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
+	    NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
+	    mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
+	    &mpt->buffer_dmat) != 0) {
+		mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
+		return (1);
+	}
+
+	/* Create a child tag for request buffers */
+	if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
+	    NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
+	    &mpt->request_dmat) != 0) {
+		mpt_prt(mpt, "cannot create a dma tag for requests\n");
+		return (1);
+	}
+
+	/* Allocate some DMA accessable memory for requests */
+	if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
+	    BUS_DMA_NOWAIT, &mpt->request_dmap) != 0) {
+		mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
+		    MPT_REQ_MEM_SIZE(mpt));
+		return (1);
+	}
+
+	mi.mpt = mpt;
+	mi.error = 0;
+
+	/* Load and lock it into "bus space" */
+	bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
+	    MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
+
+	if (mi.error) {
+		mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
+		    mi.error);
+		return (1);
+	}
+	mpt->request_phys = mi.phys;
+
+	/*
+	 * Now create per-request dma maps
+	 */
+	i = 0;
+	pptr =  mpt->request_phys;
+	vptr =  mpt->request;
+	end = pptr + MPT_REQ_MEM_SIZE(mpt);
+	while(pptr < end) {
+		request_t *req = &mpt->request_pool[i];
+		req->index = i++;
+
+		/* Store location of Request Data */
+		req->req_pbuf = pptr;
+		req->req_vbuf = vptr;
+
+		pptr += MPT_REQUEST_AREA;
+		vptr += MPT_REQUEST_AREA;
+
+		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
+		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
+
+		error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
+		if (error) {
+			mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
+			    error);
+			return (1);
+		}
+	}
+
+	return (0);
+}
+
+static void
+mpt_dma_buf_free(struct mpt_softc *mpt)
+{
+	int i;
+	if (mpt->request_dmat == 0) {
+		mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
+		return;
+	}
+	for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
+		bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
+	}
+	bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
+	bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
+	bus_dma_tag_destroy(mpt->request_dmat);
+	mpt->request_dmat = 0;
+	bus_dma_tag_destroy(mpt->buffer_dmat);
+}
+
 /*
  * Allocate/Initialize data structures for the controller.  Called
  * once at instance startup.
@@ -2488,7 +2583,7 @@ static int
 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
 {
 	PTR_MSG_PORT_FACTS_REPLY pfp;
-	int error,  port;
+	int error, port, val;
 	size_t len;
 
 	if (tn == MPT_MAX_TRYS) {
@@ -2548,7 +2643,7 @@ mpt_configure_ioc(struct mpt_softc *mpt,
 
 	/* limited by the number of chain areas the card will support */
 	if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
-		mpt_lprt(mpt, MPT_PRT_DEBUG,
+		mpt_lprt(mpt, MPT_PRT_INFO,
 		    "chain depth limited to %u (from %u)\n",
 		    mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
 		mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
@@ -2557,17 +2652,37 @@ mpt_configure_ioc(struct mpt_softc *mpt,
 	/* converted to the number of simple sges in chain segments. */
 	mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
 
-	mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
-	    mpt->max_seg_cnt);
-	mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
+	/*
+	 * Use this as the basis for reporting the maximum I/O size to CAM.
+	 */
+	mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
+
+	error = mpt_dma_buf_alloc(mpt);
+	if (error != 0) {
+		mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
+		return (EIO);
+	}
+
+	for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
+		request_t *req = &mpt->request_pool[val];
+		req->state = REQ_STATE_ALLOCATED;
+		mpt_callout_init(mpt, &req->callout);
+		mpt_free_request(mpt, req);
+	}
+
+	mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
+		 "CAM Segment Count: %u\n", mpt->max_seg_cnt,
+		 mpt->max_cam_seg_cnt);
+
+	mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
 	    mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
-	mpt_lprt(mpt, MPT_PRT_DEBUG,
+	mpt_lprt(mpt, MPT_PRT_INFO,
 	    "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
 	    "Request Frame Size %u bytes Max Chain Depth %u\n",
 	    mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
 	    mpt->ioc_facts.RequestFrameSize << 2,
 	    mpt->ioc_facts.MaxChainDepth);
-	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
+	mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
 	    "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
 	    mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
 
@@ -2757,7 +2872,7 @@ mpt_enable_ioc(struct mpt_softc *mpt, in
 		mpt_send_event_request(mpt, 1);
 
 		if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
-			mpt_prt(mpt, "failed to enable port 0\n");
+			mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
 			return (ENXIO);
 		}
 	}

Modified: stable/8/sys/dev/mpt/mpt.h
==============================================================================
--- stable/8/sys/dev/mpt/mpt.h	Thu Jul 22 14:52:51 2010	(r210375)
+++ stable/8/sys/dev/mpt/mpt.h	Thu Jul 22 15:38:36 2010	(r210376)
@@ -743,6 +743,7 @@ struct mpt_softc {
 	bus_addr_t		request_phys;	/* BusAddr of request memory */
 
 	uint32_t		max_seg_cnt;	/* calculated after IOC facts */
+	uint32_t		max_cam_seg_cnt;/* calculated from MAXPHYS*/
 
 	/*
 	 * Hardware management
@@ -995,9 +996,6 @@ mpt_pio_read(struct mpt_softc *mpt, int 
 /* Max MPT Reply we are willing to accept (must be power of 2) */
 #define MPT_REPLY_SIZE   	256
 
-/* Max i/o size, based on legacy MAXPHYS.  Can be increased. */
-#define MPT_MAXPHYS		(128 * 1024)
-
 /*
  * Must be less than 16384 in order for target mode to work
  */

Modified: stable/8/sys/dev/mpt/mpt_cam.c
==============================================================================
--- stable/8/sys/dev/mpt/mpt_cam.c	Thu Jul 22 14:52:51 2010	(r210375)
+++ stable/8/sys/dev/mpt/mpt_cam.c	Thu Jul 22 15:38:36 2010	(r210376)
@@ -1205,25 +1205,21 @@ mpt_cam_detach(struct mpt_softc *mpt)
 		free(mpt->sas_portinfo, M_DEVBUF);
 		mpt->sas_portinfo = NULL;
 	}
-	MPT_UNLOCK(mpt);
 
 	if (mpt->sim != NULL) {
 		xpt_free_path(mpt->path);
-		MPT_LOCK(mpt);
 		xpt_bus_deregister(cam_sim_path(mpt->sim));
-		MPT_UNLOCK(mpt);
 		cam_sim_free(mpt->sim, TRUE);
 		mpt->sim = NULL;
 	}
 
 	if (mpt->phydisk_sim != NULL) {
 		xpt_free_path(mpt->phydisk_path);
-		MPT_LOCK(mpt);
 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
-		MPT_UNLOCK(mpt);
 		cam_sim_free(mpt->phydisk_sim, TRUE);
 		mpt->phydisk_sim = NULL;
 	}
+	MPT_UNLOCK(mpt);
 }
 
 /* This routine is used after a system crash to dump core onto the swap device.
@@ -3586,6 +3582,7 @@ mpt_action(struct cam_sim *sim, union cc
 		cpi->target_sprt = 0;
 		cpi->hba_eng_cnt = 0;
 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
+		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
 		/*
 		 * FC cards report MAX_DEVICES of 512, but
 		 * the MSG_SCSI_IO_REQUEST target id field
@@ -4226,7 +4223,7 @@ mpt_fc_post_els(struct mpt_softc *mpt, r
 	/*
 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
 	 * consist of a TE SGL element (with details length of zero)
-	 * followe by a SIMPLE SGL element which holds the address
+	 * followed by a SIMPLE SGL element which holds the address
 	 * of the buffer.
 	 */
 

Modified: stable/8/sys/dev/mpt/mpt_pci.c
==============================================================================
--- stable/8/sys/dev/mpt/mpt_pci.c	Thu Jul 22 14:52:51 2010	(r210375)
+++ stable/8/sys/dev/mpt/mpt_pci.c	Thu Jul 22 15:38:36 2010	(r210376)
@@ -720,9 +720,6 @@ mpt_pci_shutdown(device_t dev)
 static int
 mpt_dma_mem_alloc(struct mpt_softc *mpt)
 {
-	int i, error, nsegs;
-	uint8_t *vptr;
-	uint32_t pptr, end;
 	size_t len;
 	struct mpt_map_info mi;
 
@@ -795,82 +792,6 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
 	}
 	mpt->reply_phys = mi.phys;
 
-	/* Create a child tag for data buffers */
-
-	/*
-	 * XXX: we should say that nsegs is 'unrestricted, but that
-	 * XXX: tickles a horrible bug in the busdma code. Instead,
-	 * XXX: we'll derive a reasonable segment limit from MPT_MAXPHYS
-	 */
-	nsegs = (MPT_MAXPHYS / PAGE_SIZE) + 1;
-	if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
-	    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
-	    NULL, NULL, MAXBSIZE, nsegs, BUS_SPACE_MAXSIZE_32BIT, 0,
-	    &mpt->buffer_dmat) != 0) {
-		mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
-		return (1);
-	}
-
-	/* Create a child tag for request buffers */
-	if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
-	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
-	    NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
-	    &mpt->request_dmat) != 0) {
-		mpt_prt(mpt, "cannot create a dma tag for requests\n");
-		return (1);
-	}
-
-	/* Allocate some DMA accessable memory for requests */
-	if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
-	    BUS_DMA_NOWAIT, &mpt->request_dmap) != 0) {
-		mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
-		    MPT_REQ_MEM_SIZE(mpt));
-		return (1);
-	}
-
-	mi.mpt = mpt;
-	mi.error = 0;
-
-	/* Load and lock it into "bus space" */
-        bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
-	    MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
-
-	if (mi.error) {
-		mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
-		    mi.error);
-		return (1);
-	}
-	mpt->request_phys = mi.phys;
-
-	/*
-	 * Now create per-request dma maps
-	 */
-	i = 0;
-	pptr =  mpt->request_phys;
-	vptr =  mpt->request;
-	end = pptr + MPT_REQ_MEM_SIZE(mpt);
-	while(pptr < end) {
-		request_t *req = &mpt->request_pool[i];
-		req->index = i++;
-
-		/* Store location of Request Data */
-		req->req_pbuf = pptr;
-		req->req_vbuf = vptr;
-
-		pptr += MPT_REQUEST_AREA;
-		vptr += MPT_REQUEST_AREA;
-
-		req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
-		req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
-
-		error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
-		if (error) {
-			mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
-			    error);
-			return (1);
-		}
-	}
-
 	return (0);
 }
 
@@ -881,7 +802,6 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
 static void
 mpt_dma_mem_free(struct mpt_softc *mpt)
 {
-	int i;
 
         /* Make sure we aren't double destroying */
         if (mpt->reply_dmat == 0) {
@@ -889,13 +809,6 @@ mpt_dma_mem_free(struct mpt_softc *mpt)
 		return;
         }
                 
-	for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
-		bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
-	}
-	bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
-	bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
-	bus_dma_tag_destroy(mpt->request_dmat);
-	bus_dma_tag_destroy(mpt->buffer_dmat);
 	bus_dmamap_unload(mpt->reply_dmat, mpt->reply_dmap);
 	bus_dmamem_free(mpt->reply_dmat, mpt->reply, mpt->reply_dmap);
 	bus_dma_tag_destroy(mpt->reply_dmat);

Modified: stable/8/sys/dev/mpt/mpt_raid.c
==============================================================================
--- stable/8/sys/dev/mpt/mpt_raid.c	Thu Jul 22 14:52:51 2010	(r210375)
+++ stable/8/sys/dev/mpt/mpt_raid.c	Thu Jul 22 15:38:36 2010	(r210376)
@@ -646,7 +646,7 @@ mpt_terminate_raid_thread(struct mpt_sof
 		return;
 	}
 	mpt->shutdwn_raid = 1;
-	wakeup(mpt->raid_volumes);
+	wakeup(&mpt->raid_volumes);
 	/*
 	 * Sleep on a slightly different location
 	 * for this interlock just for added safety.



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201007221538.o6MFcaqe087559>