Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 31 Dec 2017 03:06:29 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r327404 - in stable/11/sys: kern vm
Message-ID:  <201712310306.vBV36TcH086521@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Sun Dec 31 03:06:29 2017
New Revision: 327404
URL: https://svnweb.freebsd.org/changeset/base/327404

Log:
  MFC r323234,r323305,r323306,r324044:
  
      Start annotating global _padalign locks with __exclusive_cache_line
  
      While these locks are guarnteed to not share their respective cache lines,
      their current placement leaves unnecessary holes in lines which preceeded them.
  
      For instance the annotation of vm_page_queue_free_mtx allows 2 neighbour
      cachelines (previously separate by the lock) to be collapsed into 1.
  
      The annotation is only effective on architectures which have it implemented in
      their linker script (currently only amd64). Thus locks are not converted to
      their not-padaligned variants as to not affect the rest.
  
  =============
  
      Annotate global process locks with __exclusive_cache_line
  
  =============
  
      Annotate Giant with __exclusive_cache_line
  
  =============
  
      Annotate sysctlmemlock with __exclusive_cache_line.

Modified:
  stable/11/sys/kern/kern_proc.c
  stable/11/sys/kern/kern_sysctl.c
  stable/11/sys/kern/subr_vmem.c
  stable/11/sys/kern/vfs_bio.c
  stable/11/sys/vm/uma_core.c
  stable/11/sys/vm/vm_page.c
  stable/11/sys/vm/vm_pager.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/kern_proc.c
==============================================================================
--- stable/11/sys/kern/kern_proc.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/kern/kern_proc.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -131,9 +131,9 @@ struct pgrphashhead *pgrphashtbl;
 u_long pgrphash;
 struct proclist allproc;
 struct proclist zombproc;
-struct sx allproc_lock;
-struct sx proctree_lock;
-struct mtx ppeers_lock;
+struct sx __exclusive_cache_line allproc_lock;
+struct sx __exclusive_cache_line proctree_lock;
+struct mtx __exclusive_cache_line ppeers_lock;
 uma_zone_t proc_zone;
 
 /*

Modified: stable/11/sys/kern/kern_sysctl.c
==============================================================================
--- stable/11/sys/kern/kern_sysctl.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/kern/kern_sysctl.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -88,7 +88,7 @@ static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl
  * sysctl requests larger than a single page via an exclusive lock.
  */
 static struct rmlock sysctllock;
-static struct sx sysctlmemlock;
+static struct sx __exclusive_cache_line sysctlmemlock;
 
 #define	SYSCTL_WLOCK()		rm_wlock(&sysctllock)
 #define	SYSCTL_WUNLOCK()	rm_wunlock(&sysctllock)

Modified: stable/11/sys/kern/subr_vmem.c
==============================================================================
--- stable/11/sys/kern/subr_vmem.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/kern/subr_vmem.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -181,7 +181,7 @@ static struct callout	vmem_periodic_ch;
 static int		vmem_periodic_interval;
 static struct task	vmem_periodic_wk;
 
-static struct mtx_padalign vmem_list_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
 
 /* ---- misc */
@@ -580,7 +580,7 @@ qc_drain(vmem_t *vm)
 
 #ifndef UMA_MD_SMALL_ALLOC
 
-static struct mtx_padalign vmem_bt_lock;
+static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
 
 /*
  * vmem_bt_alloc:  Allocate a new page of boundary tags.

Modified: stable/11/sys/kern/vfs_bio.c
==============================================================================
--- stable/11/sys/kern/vfs_bio.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/kern/vfs_bio.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -253,23 +253,23 @@ SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN
 /*
  * This lock synchronizes access to bd_request.
  */
-static struct mtx_padalign bdlock;
+static struct mtx_padalign __exclusive_cache_line bdlock;
 
 /*
  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
  * waitrunningbufspace().
  */
-static struct mtx_padalign rbreqlock;
+static struct mtx_padalign __exclusive_cache_line rbreqlock;
 
 /*
  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  */
-static struct rwlock_padalign nblock;
+static struct rwlock_padalign __exclusive_cache_line nblock;
 
 /*
  * Lock that protects bdirtywait.
  */
-static struct mtx_padalign bdirtylock;
+static struct mtx_padalign __exclusive_cache_line bdirtylock;
 
 /*
  * Wakeup point for bufdaemon, as well as indicator of whether it is already
@@ -348,7 +348,7 @@ static int bq_len[BUFFER_QUEUES];
 /*
  * Lock for each bufqueue
  */
-static struct mtx_padalign bqlocks[BUFFER_QUEUES];
+static struct mtx_padalign __exclusive_cache_line bqlocks[BUFFER_QUEUES];
 
 /*
  * per-cpu empty buffer cache.

Modified: stable/11/sys/vm/uma_core.c
==============================================================================
--- stable/11/sys/vm/uma_core.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/vm/uma_core.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -138,7 +138,7 @@ static LIST_HEAD(,uma_zone) uma_cachezones =
     LIST_HEAD_INITIALIZER(uma_cachezones);
 
 /* This RW lock protects the keg list */
-static struct rwlock_padalign uma_rwlock;
+static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
 
 /* Linked list of boot time pages */
 static LIST_HEAD(,uma_slab) uma_boot_pages =

Modified: stable/11/sys/vm/vm_page.c
==============================================================================
--- stable/11/sys/vm/vm_page.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/vm/vm_page.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -127,9 +127,9 @@ __FBSDID("$FreeBSD$");
  */
 
 struct vm_domain vm_dom[MAXMEMDOM];
-struct mtx_padalign vm_page_queue_free_mtx;
+struct mtx_padalign __exclusive_cache_line vm_page_queue_free_mtx;
 
-struct mtx_padalign pa_lock[PA_LOCK_COUNT];
+struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
 
 vm_page_t vm_page_array;
 long vm_page_array_size;

Modified: stable/11/sys/vm/vm_pager.c
==============================================================================
--- stable/11/sys/vm/vm_pager.c	Sun Dec 31 02:48:16 2017	(r327403)
+++ stable/11/sys/vm/vm_pager.c	Sun Dec 31 03:06:29 2017	(r327404)
@@ -165,7 +165,7 @@ struct pagerops *pagertab[] = {
  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
  * (MAXPHYS == 64k) if you want to get the most efficiency.
  */
-struct mtx_padalign pbuf_mtx;
+struct mtx_padalign __exclusive_cache_line pbuf_mtx;
 static TAILQ_HEAD(swqueue, buf) bswlist;
 static int bswneeded;
 vm_offset_t swapbkva;		/* swap buffers kva */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201712310306.vBV36TcH086521>