Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 15 Apr 2016 09:21:51 +0000 (UTC)
From:      =?UTF-8?Q?Roger_Pau_Monn=c3=a9?= <royger@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r298044 - head/sys/x86/x86
Message-ID:  <201604150921.u3F9Lp3a061731@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: royger
Date: Fri Apr 15 09:21:50 2016
New Revision: 298044
URL: https://svnweb.freebsd.org/changeset/base/298044

Log:
  busdma/bounce: revert r292255
  
  Revert r292255 because it can create bounced regions without contiguous
  page offsets, which is needed for USB devices.
  
  Another solution would be to force bouncing the full buffer always (even
  when only one page requires bouncing), but this seems overly complicated and
  unnecessary, and it will probably involve using more bounce pages than the
  current code.
  
  Reported by: phk

Modified:
  head/sys/x86/x86/busdma_bounce.c

Modified: head/sys/x86/x86/busdma_bounce.c
==============================================================================
--- head/sys/x86/x86/busdma_bounce.c	Fri Apr 15 09:13:01 2016	(r298043)
+++ head/sys/x86/x86/busdma_bounce.c	Fri Apr 15 09:21:50 2016	(r298044)
@@ -476,7 +476,8 @@ _bus_dmamap_count_phys(bus_dma_tag_t dma
 		while (buflen != 0) {
 			sgsize = MIN(buflen, dmat->common.maxsegsz);
 			if (bus_dma_run_filter(&dmat->common, curaddr)) {
-				sgsize = MIN(PAGE_SIZE, sgsize);
+				sgsize = MIN(sgsize,
+				    PAGE_SIZE - (curaddr & PAGE_MASK));
 				map->pagesneeded++;
 			}
 			curaddr += sgsize;
@@ -516,7 +517,8 @@ _bus_dmamap_count_pages(bus_dma_tag_t dm
 			else
 				paddr = pmap_extract(pmap, vaddr);
 			if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
-				sg_len = PAGE_SIZE;
+				sg_len = roundup2(sg_len,
+				    dmat->common.alignment);
 				map->pagesneeded++;
 			}
 			vaddr += sg_len;
@@ -552,7 +554,9 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat,
 			max_sgsize = MIN(buflen, dmat->common.maxsegsz);
 			sg_len = MIN(sg_len, max_sgsize);
 			if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
-				sg_len = MIN(PAGE_SIZE, max_sgsize);
+				sg_len = roundup2(sg_len,
+				    dmat->common.alignment);
+				sg_len = MIN(sg_len, max_sgsize);
 				KASSERT((sg_len & (dmat->common.alignment - 1))
 				    == 0, ("Segment size is not aligned"));
 				map->pagesneeded++;
@@ -648,7 +652,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_
     int *segp)
 {
 	bus_size_t sgsize;
-	bus_addr_t curaddr, nextaddr;
+	bus_addr_t curaddr;
 	int error;
 
 	if (map == NULL)
@@ -672,12 +676,9 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_
 		if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
 		    map->pagesneeded != 0 &&
 		    bus_dma_run_filter(&dmat->common, curaddr)) {
-			nextaddr = 0;
-			sgsize = MIN(PAGE_SIZE, sgsize);
-			if ((curaddr & PAGE_MASK) + sgsize > PAGE_SIZE)
-				nextaddr = roundup2(curaddr, PAGE_SIZE);
-			curaddr = add_bounce_page(dmat, map, 0, curaddr,
-			    nextaddr, sgsize);
+			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
+			curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
+			    sgsize);
 		}
 		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
 		    segp);
@@ -743,7 +744,8 @@ bounce_bus_dmamap_load_buffer(bus_dma_ta
 		if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
 		    map->pagesneeded != 0 &&
 		    bus_dma_run_filter(&dmat->common, curaddr)) {
-			sgsize = MIN(PAGE_SIZE, max_sgsize);
+			sgsize = roundup2(sgsize, dmat->common.alignment);
+			sgsize = MIN(sgsize, max_sgsize);
 			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
 			    sgsize);
 		} else {
@@ -772,6 +774,17 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t 
 	int error, page_index;
 	bus_size_t sgsize, max_sgsize;
 
+	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+		/*
+		 * If we have to keep the offset of each page this function
+		 * is not suitable, switch back to bus_dmamap_load_ma_triv
+		 * which is going to do the right thing in this case.
+		 */
+		error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
+		    flags, segs, segp);
+		return (error);
+	}
+
 	if (map == NULL)
 		map = &nobounce_dmamap;
 
@@ -798,7 +811,10 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t 
 		if (((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
 		    map->pagesneeded != 0 &&
 		    bus_dma_run_filter(&dmat->common, paddr)) {
-			sgsize = MIN(PAGE_SIZE, max_sgsize);
+			sgsize = roundup2(sgsize, dmat->common.alignment);
+			sgsize = MIN(sgsize, max_sgsize);
+			KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
+			    ("Segment size is not aligned"));
 			/*
 			 * Check if two pages of the user provided buffer
 			 * are used.
@@ -1159,6 +1175,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_
 	bz->active_bpages++;
 	mtx_unlock(&bounce_lock);
 
+	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+		/* Page offset needs to be preserved. */
+		bpage->vaddr |= addr1 & PAGE_MASK;
+		bpage->busaddr |= addr1 & PAGE_MASK;
+		KASSERT(addr2 == 0,
+	("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET"));
+	}
 	bpage->datavaddr = vaddr;
 	bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1);
 	KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned"));
@@ -1178,6 +1201,15 @@ free_bounce_page(bus_dma_tag_t dmat, str
 	bz = dmat->bounce_zone;
 	bpage->datavaddr = 0;
 	bpage->datacount = 0;
+	if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
+		/*
+		 * Reset the bounce page to start at offset 0.  Other uses
+		 * of this bounce page may need to store a full page of
+		 * data and/or assume it starts on a page boundary.
+		 */
+		bpage->vaddr &= ~PAGE_MASK;
+		bpage->busaddr &= ~PAGE_MASK;
+	}
 
 	mtx_lock(&bounce_lock);
 	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201604150921.u3F9Lp3a061731>