Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 18 Nov 2019 13:37:13 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org
Subject:   svn commit: r354810 - stable/12/sys/amd64/amd64
Message-ID:  <201911181337.xAIDbDVn068728@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Mon Nov 18 13:37:13 2019
New Revision: 354810
URL: https://svnweb.freebsd.org/changeset/base/354810

Log:
  MFC r354591:
  amd64: Change SFENCE to locked op for synchronizing with CLFLUSHOPT on Intel.

Modified:
  stable/12/sys/amd64/amd64/pmap.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/12/sys/amd64/amd64/pmap.c	Mon Nov 18 13:34:27 2019	(r354809)
+++ stable/12/sys/amd64/amd64/pmap.c	Mon Nov 18 13:37:13 2019	(r354810)
@@ -2938,16 +2938,16 @@ pmap_force_invalidate_cache_range(vm_offset_t sva, vm_
 
 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
 		/*
-		 * Do per-cache line flush.  Use the sfence
+		 * Do per-cache line flush.  Use a locked
 		 * instruction to insure that previous stores are
 		 * included in the write-back.  The processor
 		 * propagates flush to other processors in the cache
 		 * coherence domain.
 		 */
-		sfence();
+		atomic_thread_fence_seq_cst();
 		for (; sva < eva; sva += cpu_clflush_line_size)
 			clflushopt(sva);
-		sfence();
+		atomic_thread_fence_seq_cst();
 	} else {
 		/*
 		 * Writes are ordered by CLFLUSH on Intel CPUs.
@@ -2989,7 +2989,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int coun
 		pmap_invalidate_cache();
 	else {
 		if (useclflushopt)
-			sfence();
+			atomic_thread_fence_seq_cst();
 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
 			mfence();
 		for (i = 0; i < count; i++) {
@@ -3003,7 +3003,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int coun
 			}
 		}
 		if (useclflushopt)
-			sfence();
+			atomic_thread_fence_seq_cst();
 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
 			mfence();
 	}
@@ -3024,10 +3024,10 @@ pmap_flush_cache_range(vm_offset_t sva, vm_offset_t ev
 	if (pmap_kextract(sva) == lapic_paddr)
 		return;
 
-	sfence();
+	atomic_thread_fence_seq_cst();
 	for (; sva < eva; sva += cpu_clflush_line_size)
 		clwb(sva);
-	sfence();
+	atomic_thread_fence_seq_cst();
 }
 
 void
@@ -3060,7 +3060,7 @@ pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t
 		sched_pin();
 		pte_store(pte, spa | pte_bits);
 		invlpg(vaddr);
-		/* XXXKIB sfences inside flush_cache_range are excessive */
+		/* XXXKIB atomic inside flush_cache_range are excessive */
 		pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
 		sched_unpin();
 	}
@@ -9286,10 +9286,10 @@ pmap_large_map_wb_fence_mfence(void)
 }
 
 static void
-pmap_large_map_wb_fence_sfence(void)
+pmap_large_map_wb_fence_atomic(void)
 {
 
-	sfence();
+	atomic_thread_fence_seq_cst();
 }
 
 static void
@@ -9304,7 +9304,7 @@ DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (v
 		return (pmap_large_map_wb_fence_mfence);
 	else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
 	    CPUID_STDEXT_CLFLUSHOPT)) == 0)
-		return (pmap_large_map_wb_fence_sfence);
+		return (pmap_large_map_wb_fence_atomic);
 	else
 		/* clflush is strongly enough ordered */
 		return (pmap_large_map_wb_fence_nop);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201911181337.xAIDbDVn068728>