Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 10 Nov 2019 09:41:29 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r354591 - head/sys/amd64/amd64
Message-ID:  <201911100941.xAA9fTON072587@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Sun Nov 10 09:41:29 2019
New Revision: 354591
URL: https://svnweb.freebsd.org/changeset/base/354591

Log:
  amd64: Change SFENCE to locked op for synchronizing with CLFLUSHOPT on Intel.
  
  Reviewed by:	cem, jhb
  Discussed with:	alc, scottph
  Sponsored by:	The FreeBSD Foundation
  MFC after:	1 week
  Differential revision:	https://reviews.freebsd.org/D22007

Modified:
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Sun Nov 10 09:32:17 2019	(r354590)
+++ head/sys/amd64/amd64/pmap.c	Sun Nov 10 09:41:29 2019	(r354591)
@@ -3053,16 +3053,16 @@ pmap_force_invalidate_cache_range(vm_offset_t sva, vm_
 
 	if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
 		/*
-		 * Do per-cache line flush.  Use the sfence
+		 * Do per-cache line flush.  Use a locked
 		 * instruction to insure that previous stores are
 		 * included in the write-back.  The processor
 		 * propagates flush to other processors in the cache
 		 * coherence domain.
 		 */
-		sfence();
+		atomic_thread_fence_seq_cst();
 		for (; sva < eva; sva += cpu_clflush_line_size)
 			clflushopt(sva);
-		sfence();
+		atomic_thread_fence_seq_cst();
 	} else {
 		/*
 		 * Writes are ordered by CLFLUSH on Intel CPUs.
@@ -3104,7 +3104,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int coun
 		pmap_invalidate_cache();
 	else {
 		if (useclflushopt)
-			sfence();
+			atomic_thread_fence_seq_cst();
 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
 			mfence();
 		for (i = 0; i < count; i++) {
@@ -3118,7 +3118,7 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int coun
 			}
 		}
 		if (useclflushopt)
-			sfence();
+			atomic_thread_fence_seq_cst();
 		else if (cpu_vendor_id != CPU_VENDOR_INTEL)
 			mfence();
 	}
@@ -3139,10 +3139,10 @@ pmap_flush_cache_range(vm_offset_t sva, vm_offset_t ev
 	if (pmap_kextract(sva) == lapic_paddr)
 		return;
 
-	sfence();
+	atomic_thread_fence_seq_cst();
 	for (; sva < eva; sva += cpu_clflush_line_size)
 		clwb(sva);
-	sfence();
+	atomic_thread_fence_seq_cst();
 }
 
 void
@@ -3175,7 +3175,7 @@ pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t
 		sched_pin();
 		pte_store(pte, spa | pte_bits);
 		invlpg(vaddr);
-		/* XXXKIB sfences inside flush_cache_range are excessive */
+		/* XXXKIB atomic inside flush_cache_range are excessive */
 		pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
 		sched_unpin();
 	}
@@ -9504,10 +9504,10 @@ pmap_large_map_wb_fence_mfence(void)
 }
 
 static void
-pmap_large_map_wb_fence_sfence(void)
+pmap_large_map_wb_fence_atomic(void)
 {
 
-	sfence();
+	atomic_thread_fence_seq_cst();
 }
 
 static void
@@ -9522,7 +9522,7 @@ DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (v
 		return (pmap_large_map_wb_fence_mfence);
 	else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
 	    CPUID_STDEXT_CLFLUSHOPT)) == 0)
-		return (pmap_large_map_wb_fence_sfence);
+		return (pmap_large_map_wb_fence_atomic);
 	else
 		/* clflush is strongly enough ordered */
 		return (pmap_large_map_wb_fence_nop);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201911100941.xAA9fTON072587>