Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 23 Aug 2019 22:03:51 +0000 (UTC)
From:      John Baldwin <jhb@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r351439 - in stable/11/sys/amd64: amd64 include pci
Message-ID:  <201908232203.x7NM3pRp005269@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jhb
Date: Fri Aug 23 22:03:50 2019
New Revision: 351439
URL: https://svnweb.freebsd.org/changeset/base/351439

Log:
  MFC 339432: Do not flush cache for PCIe config window.
  
  Apparently AMD machines cannot tolerate this. This was uncovered by
  r339386, where cache flush started really flushing the requested range.
  
  Introduce pmap_mapdev_pciecfg(), which simply does not flush cache
  comparing with pmap_mapdev().  It assumes that the MCFG region was
  never accessed through the cacheable mapping, which is most likely
  true for machine to boot at all.
  
  Note that i386 does not need the change, since the architecture
  handles access per-page due to the KVA shortage, and page remapping
  already does not flush the cache.
  
  MFC note: 339386 has not been MFC'd to 11, but merging this change
  should still be fine for 11 and reduces conflicts in MFCs of other
  changes.

Modified:
  stable/11/sys/amd64/amd64/pmap.c
  stable/11/sys/amd64/include/pmap.h
  stable/11/sys/amd64/pci/pci_cfgreg.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/11/sys/amd64/amd64/pmap.c	Fri Aug 23 21:05:37 2019	(r351438)
+++ stable/11/sys/amd64/amd64/pmap.c	Fri Aug 23 22:03:50 2019	(r351439)
@@ -634,7 +634,8 @@ static void	pmap_pvh_free(struct md_page *pvh, pmap_t 
 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
 		    vm_offset_t va);
 
-static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
+static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode,
+    bool noflush);
 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
     vm_offset_t va, struct rwlock **lockp);
@@ -6834,8 +6835,8 @@ pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mas
  * routine is intended to be used for mapping device memory,
  * NOT real memory.
  */
-void *
-pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
+static void *
+pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, bool noflush)
 {
 	struct pmap_preinit_mapping *ppim;
 	vm_offset_t va, offset;
@@ -6878,7 +6879,10 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mo
 		 */
 		if (pa < dmaplimit && pa + size <= dmaplimit) {
 			va = PHYS_TO_DMAP(pa);
-			if (!pmap_change_attr(va, size, mode))
+			PMAP_LOCK(kernel_pmap);
+			i = pmap_change_attr_locked(va, size, mode, noflush);
+			PMAP_UNLOCK(kernel_pmap);
+			if (!i)
 				return ((void *)(va + offset));
 		}
 		va = kva_alloc(size);
@@ -6888,22 +6892,37 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mo
 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
-	pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
+	if (!noflush)
+		pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
 	return ((void *)(va + offset));
 }
 
 void *
+pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
+{
+
+	return (pmap_mapdev_internal(pa, size, mode, false));
+}
+
+void *
 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 {
 
-	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
+	return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, false));
 }
 
 void *
+pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
+{
+
+	return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, true));
+}
+
+void *
 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 {
 
-	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
+	return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK, false));
 }
 
 void
@@ -7042,13 +7061,13 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int m
 	int error;
 
 	PMAP_LOCK(kernel_pmap);
-	error = pmap_change_attr_locked(va, size, mode);
+	error = pmap_change_attr_locked(va, size, mode, false);
 	PMAP_UNLOCK(kernel_pmap);
 	return (error);
 }
 
 static int
-pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
+pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
 {
 	vm_offset_t base, offset, tmpva;
 	vm_paddr_t pa_start, pa_end, pa_end1;
@@ -7165,7 +7184,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size
 					/* Run ended, update direct map. */
 					error = pmap_change_attr_locked(
 					    PHYS_TO_DMAP(pa_start),
-					    pa_end - pa_start, mode);
+					    pa_end - pa_start, mode, noflush);
 					if (error != 0)
 						break;
 					/* Start physical address run. */
@@ -7195,7 +7214,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size
 					/* Run ended, update direct map. */
 					error = pmap_change_attr_locked(
 					    PHYS_TO_DMAP(pa_start),
-					    pa_end - pa_start, mode);
+					    pa_end - pa_start, mode, noflush);
 					if (error != 0)
 						break;
 					/* Start physical address run. */
@@ -7223,7 +7242,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size
 					/* Run ended, update direct map. */
 					error = pmap_change_attr_locked(
 					    PHYS_TO_DMAP(pa_start),
-					    pa_end - pa_start, mode);
+					    pa_end - pa_start, mode, noflush);
 					if (error != 0)
 						break;
 					/* Start physical address run. */
@@ -7238,7 +7257,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size
 		pa_end1 = MIN(pa_end, dmaplimit);
 		if (pa_start != pa_end1)
 			error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
-			    pa_end1 - pa_start, mode);
+			    pa_end1 - pa_start, mode, noflush);
 	}
 
 	/*
@@ -7247,7 +7266,8 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size
 	 */
 	if (changed) {
 		pmap_invalidate_range(kernel_pmap, base, tmpva);
-		pmap_invalidate_cache_range(base, tmpva, FALSE);
+		if (!noflush)
+			pmap_invalidate_cache_range(base, tmpva, FALSE);
 	}
 	return (error);
 }

Modified: stable/11/sys/amd64/include/pmap.h
==============================================================================
--- stable/11/sys/amd64/include/pmap.h	Fri Aug 23 21:05:37 2019	(r351438)
+++ stable/11/sys/amd64/include/pmap.h	Fri Aug 23 22:03:50 2019	(r351439)
@@ -419,6 +419,7 @@ void	pmap_kremove(vm_offset_t);
 void	*pmap_mapbios(vm_paddr_t, vm_size_t);
 void	*pmap_mapdev(vm_paddr_t, vm_size_t);
 void	*pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
+void	*pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size);
 boolean_t pmap_page_is_mapped(vm_page_t m);
 void	pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
 void	pmap_pinit_pml4(vm_page_t);

Modified: stable/11/sys/amd64/pci/pci_cfgreg.c
==============================================================================
--- stable/11/sys/amd64/pci/pci_cfgreg.c	Fri Aug 23 21:05:37 2019	(r351438)
+++ stable/11/sys/amd64/pci/pci_cfgreg.c	Fri Aug 23 22:03:50 2019	(r351439)
@@ -269,7 +269,7 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t
 		    base);
 
 	/* XXX: We should make sure this really fits into the direct map. */
-	pcie_base = (vm_offset_t)pmap_mapdev(base, (maxbus + 1) << 20);
+	pcie_base = (vm_offset_t)pmap_mapdev_pciecfg(base, (maxbus + 1) << 20);
 	pcie_minbus = minbus;
 	pcie_maxbus = maxbus;
 	cfgmech = CFGMECH_PCIE;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201908232203.x7NM3pRp005269>