Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 22 Feb 2010 17:03:45 +0000 (UTC)
From:      "Justin T. Gibbs" <gibbs@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r204214 - head/sys/amd64/amd64
Message-ID:  <201002221703.o1MH3jdH004862@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: gibbs
Date: Mon Feb 22 17:03:45 2010
New Revision: 204214
URL: http://svn.freebsd.org/changeset/base/204214

Log:
  Enforce stronger semantics for bus-dma alignment (currently only on amd64).
  Now all contiguous regions returned from bus-dma will be aligned to the
  alignment constraint and all but the last region are guaranteed to be
  a multiple of the alignment in length.  This also means that the relative
  alignment of two adjacent bytes in the I/O stream have a difference of 1
  even if they are not physically contiguous.
  
  The old code, when needing to perform a copy in order to align data, only
  copied the amount of data needed to reach the next page boundary.  This
  often left an unaligned end to the segment.  Drivers such as Xen's blkfront
  can't deal with such segments.
  
  The downside to this approach is that, once an unaligned region is encountered,
  the remainder of the I/O will be bounced.  However, bouncing should be rare.
  It is typically caused by non-performance critical userland programs that
  don't bother to align their I/O buffers (e.g. bsdlabel).  In-kernel I/O
  buffers are always aligned to at least a page boundary.
  
  Reviewed by:	scottl
  MFC after:      2 weeks

Modified:
  head/sys/amd64/amd64/busdma_machdep.c

Modified: head/sys/amd64/amd64/busdma_machdep.c
==============================================================================
--- head/sys/amd64/amd64/busdma_machdep.c	Mon Feb 22 17:03:26 2010	(r204213)
+++ head/sys/amd64/amd64/busdma_machdep.c	Mon Feb 22 17:03:45 2010	(r204214)
@@ -239,8 +239,7 @@ bus_dma_tag_create(bus_dma_tag_t parent,
 	newtag->alignment = alignment;
 	newtag->boundary = boundary;
 	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
-	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
-	    (PAGE_SIZE - 1);
+	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
 	newtag->filter = filter;
 	newtag->filterarg = filterarg;
 	newtag->maxsize = maxsize;
@@ -605,13 +604,18 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		vendaddr = (vm_offset_t)buf + buflen;
 
 		while (vaddr < vendaddr) {
+			bus_size_t sg_len;
+
+			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
 			if (pmap)
 				paddr = pmap_extract(pmap, vaddr);
 			else
 				paddr = pmap_kextract(vaddr);
-			if (run_filter(dmat, paddr) != 0)
+			if (run_filter(dmat, paddr) != 0) {
+				sg_len = roundup2(sg_len, dmat->alignment);
 				map->pagesneeded++;
-			vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
+			}
+			vaddr += sg_len;
 		}
 		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
 	}
@@ -644,6 +648,8 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 	bmask = ~(dmat->boundary - 1);
 
 	for (seg = *segp; buflen > 0 ; ) {
+		bus_size_t max_sgsize;
+
 		/*
 		 * Get the physical address for this segment.
 		 */
@@ -655,11 +661,15 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		/*
 		 * Compute the segment size, and adjust counts.
 		 */
-		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
-		if (sgsize > dmat->maxsegsz)
-			sgsize = dmat->maxsegsz;
-		if (buflen < sgsize)
-			sgsize = buflen;
+		max_sgsize = MIN(buflen, dmat->maxsegsz);
+		sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
+		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
+			sgsize = roundup2(sgsize, dmat->alignment);
+			sgsize = MIN(sgsize, max_sgsize);
+			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+		} else {
+			sgsize = MIN(sgsize, max_sgsize);
+		}
 
 		/*
 		 * Make sure we don't cross any boundaries.
@@ -670,9 +680,6 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 				sgsize = (baddr - curaddr);
 		}
 
-		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
-			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
-
 		/*
 		 * Insert chunk into a segment, coalescing with
 		 * previous segment if possible.



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201002221703.o1MH3jdH004862>