Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 4 Dec 2016 21:13:27 +0000 (UTC)
From:      Jason Evans <jasone@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r309540 - in head/contrib/jemalloc: . doc include/jemalloc include/jemalloc/internal src
Message-ID:  <201612042113.uB4LDRO2006973@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jasone
Date: Sun Dec  4 21:13:26 2016
New Revision: 309540
URL: https://svnweb.freebsd.org/changeset/base/309540

Log:
  Update jemalloc to 4.4.0.

Modified:
  head/contrib/jemalloc/ChangeLog
  head/contrib/jemalloc/FREEBSD-diffs
  head/contrib/jemalloc/VERSION
  head/contrib/jemalloc/doc/jemalloc.3
  head/contrib/jemalloc/include/jemalloc/internal/arena.h
  head/contrib/jemalloc/include/jemalloc/internal/chunk.h
  head/contrib/jemalloc/include/jemalloc/internal/extent.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
  head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
  head/contrib/jemalloc/include/jemalloc/internal/pages.h
  head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
  head/contrib/jemalloc/include/jemalloc/internal/stats.h
  head/contrib/jemalloc/include/jemalloc/internal/util.h
  head/contrib/jemalloc/include/jemalloc/internal/valgrind.h
  head/contrib/jemalloc/include/jemalloc/jemalloc.h
  head/contrib/jemalloc/src/arena.c
  head/contrib/jemalloc/src/base.c
  head/contrib/jemalloc/src/chunk.c
  head/contrib/jemalloc/src/chunk_dss.c
  head/contrib/jemalloc/src/extent.c
  head/contrib/jemalloc/src/huge.c
  head/contrib/jemalloc/src/jemalloc.c
  head/contrib/jemalloc/src/pages.c
  head/contrib/jemalloc/src/stats.c
  head/contrib/jemalloc/src/tcache.c
  head/contrib/jemalloc/src/util.c

Modified: head/contrib/jemalloc/ChangeLog
==============================================================================
--- head/contrib/jemalloc/ChangeLog	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/ChangeLog	Sun Dec  4 21:13:26 2016	(r309540)
@@ -4,6 +4,33 @@ brevity.  Much more detail can be found 
 
     https://github.com/jemalloc/jemalloc
 
+* 4.4.0 (December 3, 2016)
+
+  New features:
+  - Add configure support for *-*-linux-android.  (@cferris1000, @jasone)
+  - Add the --disable-syscall configure option, for use on systems that place
+    security-motivated limitations on syscall(2).  (@jasone)
+  - Add support for Debian GNU/kFreeBSD.  (@thesam)
+
+  Optimizations:
+  - Add extent serial numbers and use them where appropriate as a sort key that
+    is higher priority than address, so that the allocation policy prefers older
+    extents.  This tends to improve locality (decrease fragmentation) when
+    memory grows downward.  (@jasone)
+  - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
+    on Linux 4.5 and newer.  (@jasone)
+  - Mark partially purged arena chunks as non-huge-page.  This improves
+    interaction with Linux's transparent huge page functionality.  (@jasone)
+
+  Bug fixes:
+  - Fix size class computations for edge conditions involving extremely large
+    allocations.  This regression was first released in 4.0.0.  (@jasone,
+    @ingvarha)
+  - Remove overly restrictive assertions related to the cactive statistic.  This
+    regression was first released in 4.1.0.  (@jasone)
+  - Implement a more reliable detection scheme for os_unfair_lock on macOS.
+    (@jszakmeister)
+
 * 4.3.1 (November 7, 2016)
 
   Bug fixes:

Modified: head/contrib/jemalloc/FREEBSD-diffs
==============================================================================
--- head/contrib/jemalloc/FREEBSD-diffs	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/FREEBSD-diffs	Sun Dec  4 21:13:26 2016	(r309540)
@@ -1,5 +1,5 @@
 diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index 3d2e721..b361db2 100644
+index d9c8345..9898c3c 100644
 --- a/doc/jemalloc.xml.in
 +++ b/doc/jemalloc.xml.in
 @@ -53,11 +53,23 @@
@@ -47,10 +47,10 @@ index 3d2e721..b361db2 100644
 +  </refsect1>
  </refentry>
 diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
-index f39ce54..a3ba55d 100644
+index ce4e602..35360b6 100644
 --- a/include/jemalloc/internal/arena.h
 +++ b/include/jemalloc/internal/arena.h
-@@ -719,8 +719,13 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+@@ -730,8 +730,13 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
  JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
  arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
  {
@@ -64,7 +64,7 @@ index f39ce54..a3ba55d 100644
  }
  
  JEMALLOC_ALWAYS_INLINE size_t
-@@ -779,8 +784,13 @@ arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
+@@ -790,8 +795,13 @@ arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
  JEMALLOC_ALWAYS_INLINE const size_t *
  arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
  {
@@ -79,7 +79,7 @@ index f39ce54..a3ba55d 100644
  
  JEMALLOC_ALWAYS_INLINE size_t
 diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index fdc8fef..56a35a4 100644
+index e7ace7d..d86c61d 100644
 --- a/include/jemalloc/internal/jemalloc_internal.h.in
 +++ b/include/jemalloc/internal/jemalloc_internal.h.in
 @@ -8,6 +8,9 @@
@@ -144,10 +144,10 @@ index b442d2d..76518db 100644
  
  #endif /* JEMALLOC_H_EXTERNS */
 diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
-index 87c8c9b..df576f6 100644
+index c1c6c40..c6395fd 100644
 --- a/include/jemalloc/internal/private_symbols.txt
 +++ b/include/jemalloc/internal/private_symbols.txt
-@@ -307,7 +307,6 @@ iralloct_realign
+@@ -310,7 +310,6 @@ iralloct_realign
  isalloc
  isdalloct
  isqalloc
@@ -335,7 +335,7 @@ index f943891..47d032c 100755
 +#include "jemalloc_FreeBSD.h"
  EOF
 diff --git a/src/jemalloc.c b/src/jemalloc.c
-index 38650ff..f659b55 100644
+index baead66..8a49f26 100644
 --- a/src/jemalloc.c
 +++ b/src/jemalloc.c
 @@ -4,6 +4,10 @@
@@ -349,7 +349,7 @@ index 38650ff..f659b55 100644
  /* Runtime configuration options. */
  const char	*je_malloc_conf
  #ifndef _WIN32
-@@ -2756,6 +2760,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+@@ -2775,6 +2779,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
   */
  /******************************************************************************/
  /*
@@ -457,7 +457,7 @@ index 38650ff..f659b55 100644
   * The following functions are used by threading libraries for protection of
   * malloc during fork().
   */
-@@ -2894,4 +2999,11 @@ jemalloc_postfork_child(void)
+@@ -2913,4 +3018,11 @@ jemalloc_postfork_child(void)
  	ctl_postfork_child(tsd_tsdn(tsd));
  }
  
@@ -516,7 +516,7 @@ index 6333e73..13f8d79 100644
 +#endif
 +}
 diff --git a/src/util.c b/src/util.c
-index 7905267..bee1c77 100644
+index dd8c236..a4ff287 100755
 --- a/src/util.c
 +++ b/src/util.c
 @@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)

Modified: head/contrib/jemalloc/VERSION
==============================================================================
--- head/contrib/jemalloc/VERSION	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/VERSION	Sun Dec  4 21:13:26 2016	(r309540)
@@ -1 +1 @@
-4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2
+4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc

Modified: head/contrib/jemalloc/doc/jemalloc.3
==============================================================================
--- head/contrib/jemalloc/doc/jemalloc.3	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/doc/jemalloc.3	Sun Dec  4 21:13:26 2016	(r309540)
@@ -2,12 +2,12 @@
 .\"     Title: JEMALLOC
 .\"    Author: Jason Evans
 .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>;
-.\"      Date: 11/08/2016
+.\"      Date: 12/04/2016
 .\"    Manual: User Manual
-.\"    Source: jemalloc 4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2
+.\"    Source: jemalloc 4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc
 .\"  Language: English
 .\"
-.TH "JEMALLOC" "3" "11/08/2016" "jemalloc 4.3.1-0-g0110fa8451af" "User Manual"
+.TH "JEMALLOC" "3" "12/04/2016" "jemalloc 4.4.0-0-gf1f76357313e" "User Manual"
 .\" -----------------------------------------------------------------
 .\" * Define some portability stuff
 .\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
 jemalloc \- general purpose memory allocation functions
 .SH "LIBRARY"
 .PP
-This manual describes jemalloc 4\&.3\&.1\-0\-g0110fa8451af905affd77c3bea0d545fee2251b2\&. More information can be found at the
+This manual describes jemalloc 4\&.4\&.0\-0\-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc\&. More information can be found at the
 \m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
 .PP
 The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@@ -365,7 +365,7 @@ for (i = 0; i < nbins; i++) {
 
 	mib[2] = i;
 	len = sizeof(bin_size);
-	mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
+	mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
 	/* Do something with bin_size\&.\&.\&. */
 }
 .fi

Modified: head/contrib/jemalloc/include/jemalloc/internal/arena.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/arena.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/arena.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -191,6 +191,14 @@ struct arena_chunk_s {
 	extent_node_t		node;
 
 	/*
+	 * True if memory could be backed by transparent huge pages.  This is
+	 * only directly relevant to Linux, since it is the only supported
+	 * platform on which jemalloc interacts with explicit transparent huge
+	 * page controls.
+	 */
+	bool			hugepage;
+
+	/*
 	 * Map of pages within chunk that keeps track of free/large/small.  The
 	 * first map_bias entries are omitted, since the chunk header does not
 	 * need to be tracked in the map.  This omission saves a header page
@@ -374,10 +382,12 @@ struct arena_s {
 
 	dss_prec_t		dss_prec;
 
-
 	/* Extant arena chunks. */
 	ql_head(extent_node_t)	achunks;
 
+	/* Extent serial number generator state. */
+	size_t			extent_sn_next;
+
 	/*
 	 * In order to avoid rapid chunk allocation/deallocation when an arena
 	 * oscillates right on the cusp of needing a new chunk, cache the most
@@ -453,9 +463,9 @@ struct arena_s {
 	 * orderings are needed, which is why there are two trees with the same
 	 * contents.
 	 */
-	extent_tree_t		chunks_szad_cached;
+	extent_tree_t		chunks_szsnad_cached;
 	extent_tree_t		chunks_ad_cached;
-	extent_tree_t		chunks_szad_retained;
+	extent_tree_t		chunks_szsnad_retained;
 	extent_tree_t		chunks_ad_retained;
 
 	malloc_mutex_t		chunks_mtx;
@@ -522,13 +532,13 @@ void	arena_chunk_cache_maybe_remove(aren
 extent_node_t	*arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
 void	arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
 void	*arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool *zero);
+    size_t alignment, size_t *sn, bool *zero);
 void	arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
-    size_t usize);
+    size_t usize, size_t sn);
 void	arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
     void *chunk, size_t oldsize, size_t usize);
 void	arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
-    void *chunk, size_t oldsize, size_t usize);
+    void *chunk, size_t oldsize, size_t usize, size_t sn);
 bool	arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
     void *chunk, size_t oldsize, size_t usize, bool *zero);
 ssize_t	arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
@@ -601,6 +611,7 @@ void	arena_stats_merge(tsdn_t *tsdn, are
 unsigned	arena_nthreads_get(arena_t *arena, bool internal);
 void	arena_nthreads_inc(arena_t *arena, bool internal);
 void	arena_nthreads_dec(arena_t *arena, bool internal);
+size_t	arena_extent_sn_next(arena_t *arena);
 arena_t	*arena_new(tsdn_t *tsdn, unsigned ind);
 void	arena_boot(void);
 void	arena_prefork0(tsdn_t *tsdn, arena_t *arena);

Modified: head/contrib/jemalloc/include/jemalloc/internal/chunk.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/chunk.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/chunk.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -58,15 +58,16 @@ void	chunk_deregister(const void *chunk,
 void	*chunk_alloc_base(size_t size);
 void	*chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
-    bool *zero, bool *commit, bool dalloc_node);
+    size_t *sn, bool *zero, bool *commit, bool dalloc_node);
 void	*chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
-    bool *zero, bool *commit);
+    size_t *sn, bool *zero, bool *commit);
 void	chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
-void	chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
+    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
     bool committed);
+void	chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+    chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
+    bool zeroed, bool committed);
 bool	chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
     size_t length);

Modified: head/contrib/jemalloc/include/jemalloc/internal/extent.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/extent.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/extent.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -19,6 +19,20 @@ struct extent_node_s {
 	size_t			en_size;
 
 	/*
+	 * Serial number (potentially non-unique).
+	 *
+	 * In principle serial numbers can wrap around on 32-bit systems if
+	 * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
+	 * back on address comparison for equal serial numbers, stable (if
+	 * imperfect) ordering is maintained.
+	 *
+	 * Serial numbers may not be unique even in the absence of wrap-around,
+	 * e.g. when splitting an extent and assigning the same serial number to
+	 * both resulting adjacent extents.
+	 */
+	size_t			en_sn;
+
+	/*
 	 * The zeroed flag is used by chunk recycling code to track whether
 	 * memory is zero-filled.
 	 */
@@ -45,8 +59,8 @@ struct extent_node_s {
 	qr(extent_node_t)	cc_link;
 
 	union {
-		/* Linkage for the size/address-ordered tree. */
-		rb_node(extent_node_t)	szad_link;
+		/* Linkage for the size/sn/address-ordered tree. */
+		rb_node(extent_node_t)	szsnad_link;
 
 		/* Linkage for arena's achunks, huge, and node_cache lists. */
 		ql_elm(extent_node_t)	ql_link;
@@ -61,7 +75,7 @@ typedef rb_tree(extent_node_t) extent_tr
 /******************************************************************************/
 #ifdef JEMALLOC_H_EXTERNS
 
-rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
+rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
 
 rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
 
@@ -73,6 +87,7 @@ rb_proto(, extent_tree_ad_, extent_tree_
 arena_t	*extent_node_arena_get(const extent_node_t *node);
 void	*extent_node_addr_get(const extent_node_t *node);
 size_t	extent_node_size_get(const extent_node_t *node);
+size_t	extent_node_sn_get(const extent_node_t *node);
 bool	extent_node_zeroed_get(const extent_node_t *node);
 bool	extent_node_committed_get(const extent_node_t *node);
 bool	extent_node_achunk_get(const extent_node_t *node);
@@ -80,12 +95,13 @@ prof_tctx_t	*extent_node_prof_tctx_get(c
 void	extent_node_arena_set(extent_node_t *node, arena_t *arena);
 void	extent_node_addr_set(extent_node_t *node, void *addr);
 void	extent_node_size_set(extent_node_t *node, size_t size);
+void	extent_node_sn_set(extent_node_t *node, size_t sn);
 void	extent_node_zeroed_set(extent_node_t *node, bool zeroed);
 void	extent_node_committed_set(extent_node_t *node, bool committed);
 void	extent_node_achunk_set(extent_node_t *node, bool achunk);
 void	extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
 void	extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
-    size_t size, bool zeroed, bool committed);
+    size_t size, size_t sn, bool zeroed, bool committed);
 void	extent_node_dirty_linkage_init(extent_node_t *node);
 void	extent_node_dirty_insert(extent_node_t *node,
     arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
@@ -114,6 +130,13 @@ extent_node_size_get(const extent_node_t
 	return (node->en_size);
 }
 
+JEMALLOC_INLINE size_t
+extent_node_sn_get(const extent_node_t *node)
+{
+
+	return (node->en_sn);
+}
+
 JEMALLOC_INLINE bool
 extent_node_zeroed_get(const extent_node_t *node)
 {
@@ -165,6 +188,13 @@ extent_node_size_set(extent_node_t *node
 }
 
 JEMALLOC_INLINE void
+extent_node_sn_set(extent_node_t *node, size_t sn)
+{
+
+	node->en_sn = sn;
+}
+
+JEMALLOC_INLINE void
 extent_node_zeroed_set(extent_node_t *node, bool zeroed)
 {
 
@@ -194,12 +224,13 @@ extent_node_prof_tctx_set(extent_node_t 
 
 JEMALLOC_INLINE void
 extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
-    bool zeroed, bool committed)
+    size_t sn, bool zeroed, bool committed)
 {
 
 	extent_node_arena_set(node, arena);
 	extent_node_addr_set(node, addr);
 	extent_node_size_set(node, size);
+	extent_node_sn_set(node, sn);
 	extent_node_zeroed_set(node, zeroed);
 	extent_node_committed_set(node, committed);
 	extent_node_achunk_set(node, false);

Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -334,7 +334,7 @@ typedef unsigned szind_t;
 
 /* Return the nearest aligned address at or below a. */
 #define	ALIGNMENT_ADDR2BASE(a, alignment)				\
-	((void *)((uintptr_t)(a) & (-(alignment))))
+	((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
 
 /* Return the offset between a and the nearest aligned address at or below a. */
 #define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
@@ -342,7 +342,7 @@ typedef unsigned szind_t;
 
 /* Return the smallest alignment multiple that is >= s. */
 #define	ALIGNMENT_CEILING(s, alignment)					\
-	(((s) + (alignment - 1)) & (-(alignment)))
+	(((s) + (alignment - 1)) & ((~(alignment)) + 1))
 
 /* Declare a variable-length array. */
 #if __STDC_VERSION__ < 199901L

Modified: head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -57,11 +57,6 @@
 #define JEMALLOC_HAVE_BUILTIN_CLZ 
 
 /*
- * Defined if madvise(2) is available.
- */
-#define JEMALLOC_HAVE_MADVISE 
-
-/*
  * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
  */
 /* #undef JEMALLOC_OS_UNFAIR_LOCK */
@@ -72,8 +67,8 @@
  */
 /* #undef JEMALLOC_OSSPIN */
 
-/* Defined if syscall(2) is available. */
-#define JEMALLOC_HAVE_SYSCALL 
+/* Defined if syscall(2) is usable. */
+#define JEMALLOC_USE_SYSCALL 
 
 /*
  * Defined if secure_getenv(3) is available.
@@ -85,6 +80,9 @@
  */
 #define JEMALLOC_HAVE_ISSETUGID 
 
+/* Defined if pthread_atfork(3) is available. */
+#define JEMALLOC_HAVE_PTHREAD_ATFORK 
+
 /*
  * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
  */
@@ -253,18 +251,26 @@
 #define JEMALLOC_SYSCTL_VM_OVERCOMMIT 
 /* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
 
+/* Defined if madvise(2) is available. */
+#define JEMALLOC_HAVE_MADVISE 
+
 /*
  * Methods for purging unused pages differ between operating systems.
  *
- *   madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
- *                                 such that new pages will be demand-zeroed if
- *                                 the address region is later touched.
- *   madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
- *                             unused, such that they will be discarded rather
- *                             than swapped out.
+ *   madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ *                             will be discarded rather than swapped out.
+ *   madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
+ *                                 new pages will be demand-zeroed if the
+ *                                 address region is later touched.
  */
-/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
 #define JEMALLOC_PURGE_MADVISE_FREE 
+#define JEMALLOC_PURGE_MADVISE_DONTNEED 
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+/* #undef JEMALLOC_THP */
 
 /* Define if operating system has alloca.h header. */
 /* #undef JEMALLOC_HAS_ALLOCA_H */

Modified: head/contrib/jemalloc/include/jemalloc/internal/pages.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/pages.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/pages.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -16,6 +16,8 @@ void	*pages_trim(void *addr, size_t allo
 bool	pages_commit(void *addr, size_t size);
 bool	pages_decommit(void *addr, size_t size);
 bool	pages_purge(void *addr, size_t size);
+bool	pages_huge(void *addr, size_t size);
+bool	pages_nohuge(void *addr, size_t size);
 void	pages_boot(void);
 
 #endif /* JEMALLOC_H_EXTERNS */

Modified: head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/private_namespace.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -36,6 +36,7 @@
 #define	arena_decay_time_set JEMALLOC_N(arena_decay_time_set)
 #define	arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
 #define	arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
+#define	arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
 #define	arena_get JEMALLOC_N(arena_get)
 #define	arena_ichoose JEMALLOC_N(arena_ichoose)
 #define	arena_init JEMALLOC_N(arena_init)
@@ -218,6 +219,8 @@
 #define	extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set)
 #define	extent_node_size_get JEMALLOC_N(extent_node_size_get)
 #define	extent_node_size_set JEMALLOC_N(extent_node_size_set)
+#define	extent_node_sn_get JEMALLOC_N(extent_node_sn_get)
+#define	extent_node_sn_set JEMALLOC_N(extent_node_sn_set)
 #define	extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
 #define	extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
 #define	extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
@@ -239,25 +242,25 @@
 #define	extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
 #define	extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
 #define	extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
-#define	extent_tree_szad_destroy JEMALLOC_N(extent_tree_szad_destroy)
-#define	extent_tree_szad_destroy_recurse JEMALLOC_N(extent_tree_szad_destroy_recurse)
-#define	extent_tree_szad_empty JEMALLOC_N(extent_tree_szad_empty)
-#define	extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
-#define	extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
-#define	extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
-#define	extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
-#define	extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
-#define	extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
-#define	extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
-#define	extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
-#define	extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
-#define	extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
-#define	extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
-#define	extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
-#define	extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
-#define	extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
-#define	extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
-#define	extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
+#define	extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy)
+#define	extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse)
+#define	extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty)
+#define	extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first)
+#define	extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert)
+#define	extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter)
+#define	extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse)
+#define	extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start)
+#define	extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last)
+#define	extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new)
+#define	extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next)
+#define	extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch)
+#define	extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev)
+#define	extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch)
+#define	extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove)
+#define	extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter)
+#define	extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse)
+#define	extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start)
+#define	extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search)
 #define	ffs_llu JEMALLOC_N(ffs_llu)
 #define	ffs_lu JEMALLOC_N(ffs_lu)
 #define	ffs_u JEMALLOC_N(ffs_u)
@@ -393,7 +396,9 @@
 #define	pages_boot JEMALLOC_N(pages_boot)
 #define	pages_commit JEMALLOC_N(pages_commit)
 #define	pages_decommit JEMALLOC_N(pages_decommit)
+#define	pages_huge JEMALLOC_N(pages_huge)
 #define	pages_map JEMALLOC_N(pages_map)
+#define	pages_nohuge JEMALLOC_N(pages_nohuge)
 #define	pages_purge JEMALLOC_N(pages_purge)
 #define	pages_trim JEMALLOC_N(pages_trim)
 #define	pages_unmap JEMALLOC_N(pages_unmap)

Modified: head/contrib/jemalloc/include/jemalloc/internal/stats.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/stats.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/stats.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -175,25 +175,21 @@ stats_cactive_get(void)
 JEMALLOC_INLINE void
 stats_cactive_add(size_t size)
 {
-	UNUSED size_t cactive;
 
 	assert(size > 0);
 	assert((size & chunksize_mask) == 0);
 
-	cactive = atomic_add_z(&stats_cactive, size);
-	assert(cactive - size < cactive);
+	atomic_add_z(&stats_cactive, size);
 }
 
 JEMALLOC_INLINE void
 stats_cactive_sub(size_t size)
 {
-	UNUSED size_t cactive;
 
 	assert(size > 0);
 	assert((size & chunksize_mask) == 0);
 
-	cactive = atomic_sub_z(&stats_cactive, size);
-	assert(cactive + size > cactive);
+	atomic_sub_z(&stats_cactive, size);
 }
 #endif
 

Modified: head/contrib/jemalloc/include/jemalloc/internal/util.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/util.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/util.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -41,8 +41,12 @@
 #define	MALLOC_PRINTF_BUFSIZE	4096
 
 /* Junk fill patterns. */
-#define	JEMALLOC_ALLOC_JUNK	((uint8_t)0xa5)
-#define	JEMALLOC_FREE_JUNK	((uint8_t)0x5a)
+#ifndef JEMALLOC_ALLOC_JUNK
+#  define JEMALLOC_ALLOC_JUNK	((uint8_t)0xa5)
+#endif
+#ifndef JEMALLOC_FREE_JUNK
+#  define JEMALLOC_FREE_JUNK	((uint8_t)0x5a)
+#endif
 
 /*
  * Wrap a cpp argument that contains commas such that it isn't broken up into

Modified: head/contrib/jemalloc/include/jemalloc/internal/valgrind.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/internal/valgrind.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/internal/valgrind.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -36,13 +36,25 @@
 		    zero);						\
 	}								\
 } while (0)
-#define	JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize,	\
-    ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null,	\
-    zero) do {								\
+#define	JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr)		\
+    (false)
+#define	JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr)		\
+    ((ptr) != (old_ptr))
+#define	JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr)			\
+    (false)
+#define	JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr)			\
+    (ptr == NULL)
+#define	JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr)		\
+    (false)
+#define	JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr)		\
+    (old_ptr == NULL)
+#define	JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null,	\
+    old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do {		\
 	if (unlikely(in_valgrind)) {					\
 		size_t rzsize = p2rz(tsdn, ptr);			\
 									\
-		if (!maybe_moved || ptr == old_ptr) {			\
+		if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr,	\
+		    old_ptr)) {						\
 			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
 			    usize, rzsize);				\
 			if (zero && old_usize < usize) {		\
@@ -51,11 +63,13 @@
 				    old_usize), usize - old_usize);	\
 			}						\
 		} else {						\
-			if (!old_ptr_maybe_null || old_ptr != NULL) {	\
+			if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_##	\
+			    old_ptr_null(old_ptr)) {			\
 				valgrind_freelike_block(old_ptr,	\
 				    old_rzsize);			\
 			}						\
-			if (!ptr_maybe_null || ptr != NULL) {		\
+			if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_##	\
+			    ptr_null(ptr)) {				\
 				size_t copy_size = (old_usize < usize)	\
 				    ?  old_usize : usize;		\
 				size_t tail_size = usize - copy_size;	\

Modified: head/contrib/jemalloc/include/jemalloc/jemalloc.h
==============================================================================
--- head/contrib/jemalloc/include/jemalloc/jemalloc.h	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/include/jemalloc/jemalloc.h	Sun Dec  4 21:13:26 2016	(r309540)
@@ -87,12 +87,12 @@ extern "C" {
 #include <limits.h>
 #include <strings.h>
 
-#define	JEMALLOC_VERSION "4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2"
+#define	JEMALLOC_VERSION "4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc"
 #define	JEMALLOC_VERSION_MAJOR 4
-#define	JEMALLOC_VERSION_MINOR 3
-#define	JEMALLOC_VERSION_BUGFIX 1
+#define	JEMALLOC_VERSION_MINOR 4
+#define	JEMALLOC_VERSION_BUGFIX 0
 #define	JEMALLOC_VERSION_NREV 0
-#define	JEMALLOC_VERSION_GID "0110fa8451af905affd77c3bea0d545fee2251b2"
+#define	JEMALLOC_VERSION_GID "f1f76357313e7dcad7262f17a48ff0a2e005fcdc"
 
 #  define MALLOCX_LG_ALIGN(la)	((int)(la))
 #  if LG_SIZEOF_PTR == 2

Modified: head/contrib/jemalloc/src/arena.c
==============================================================================
--- head/contrib/jemalloc/src/arena.c	Sun Dec  4 20:44:58 2016	(r309539)
+++ head/contrib/jemalloc/src/arena.c	Sun Dec  4 21:13:26 2016	(r309540)
@@ -38,8 +38,8 @@ static void	arena_run_dalloc(tsdn_t *tsd
     bool dirty, bool cleaned, bool decommitted);
 static void	arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
     arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
-    arena_run_t *run, arena_bin_t *bin);
+static void	arena_bin_lower_run(arena_t *arena, arena_run_t *run,
+    arena_bin_t *bin);
 
 /******************************************************************************/
 
@@ -55,8 +55,31 @@ arena_miscelm_size_get(const arena_chunk
 	return (arena_mapbits_size_decode(mapbits));
 }
 
+JEMALLOC_INLINE_C const extent_node_t *
+arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+{
+	arena_chunk_t *chunk;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+	return (&chunk->node);
+}
+
+JEMALLOC_INLINE_C int
+arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+{
+	size_t a_sn, b_sn;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
+	b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+
+	return ((a_sn > b_sn) - (a_sn < b_sn));
+}
+
 JEMALLOC_INLINE_C int
-arena_run_addr_comp(const arena_chunk_map_misc_t *a,
+arena_ad_comp(const arena_chunk_map_misc_t *a,
     const arena_chunk_map_misc_t *b)
 {
 	uintptr_t a_miscelm = (uintptr_t)a;
@@ -68,9 +91,26 @@ arena_run_addr_comp(const arena_chunk_ma
 	return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
 }
 
+JEMALLOC_INLINE_C int
+arena_snad_comp(const arena_chunk_map_misc_t *a,
+    const arena_chunk_map_misc_t *b)
+{
+	int ret;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	ret = arena_sn_comp(a, b);
+	if (ret != 0)
+		return (ret);
+
+	ret = arena_ad_comp(a, b);
+	return (ret);
+}
+
 /* Generate pairing heap functions. */
 ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
-    ph_link, arena_run_addr_comp)
+    ph_link, arena_snad_comp)
 
 #ifdef JEMALLOC_JET
 #undef run_quantize_floor
@@ -529,7 +569,7 @@ arena_chunk_init_spare(arena_t *arena)
 
 static bool
 arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
-    bool zero)
+    size_t sn, bool zero)
 {
 
 	/*
@@ -538,7 +578,7 @@ arena_chunk_register(tsdn_t *tsdn, arena
 	 * of runs is tracked individually, and upon chunk deallocation the
 	 * entire chunk is in a consistent commit state.
 	 */
-	extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+	extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
 	extent_node_achunk_set(&chunk->node, true);
 	return (chunk_register(tsdn, chunk, &chunk->node));
 }
@@ -548,28 +588,30 @@ arena_chunk_alloc_internal_hard(tsdn_t *
     chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
 {
 	arena_chunk_t *chunk;
+	size_t sn;
 
 	malloc_mutex_unlock(tsdn, &arena->lock);
 
 	chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
-	    NULL, chunksize, chunksize, zero, commit);
+	    NULL, chunksize, chunksize, &sn, zero, commit);
 	if (chunk != NULL && !*commit) {
 		/* Commit header. */
 		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
 		    LG_PAGE, arena->ind)) {
 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
-			    (void *)chunk, chunksize, *zero, *commit);
+			    (void *)chunk, chunksize, sn, *zero, *commit);
 			chunk = NULL;
 		}
 	}
-	if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
+	if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
+	    *zero)) {
 		if (!*commit) {
 			/* Undo commit of header. */
 			chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
 			    LG_PAGE, arena->ind);
 		}
 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
-		    chunksize, *zero, *commit);
+		    chunksize, sn, *zero, *commit);
 		chunk = NULL;
 	}
 
@@ -583,13 +625,14 @@ arena_chunk_alloc_internal(tsdn_t *tsdn,
 {
 	arena_chunk_t *chunk;
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+	size_t sn;
 
 	chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
-	    chunksize, zero, commit, true);
+	    chunksize, &sn, zero, commit, true);
 	if (chunk != NULL) {
-		if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+		if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
 			chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
-			    chunksize, true);
+			    chunksize, sn, true);
 			return (NULL);
 		}
 	}
@@ -621,6 +664,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, aren
 	if (chunk == NULL)
 		return (NULL);
 
+	chunk->hugepage = true;
+
 	/*
 	 * Initialize the map to contain one maximal free untouched run.  Mark
 	 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
@@ -684,11 +729,14 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t 
 static void
 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
 {
+	size_t sn, hugepage;
 	bool committed;
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
 
 	chunk_deregister(chunk, &chunk->node);
 
+	sn = extent_node_sn_get(&chunk->node);
+	hugepage = chunk->hugepage;
 	committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
 	if (!committed) {
 		/*
@@ -701,9 +749,17 @@ arena_chunk_discard(tsdn_t *tsdn, arena_
 		chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
 		    arena->ind);
 	}
+	if (!hugepage) {
+		/*
+		 * Convert chunk back to the default state, so that all
+		 * subsequent chunk allocations start out with chunks that can
+		 * be backed by transparent huge pages.
+		 */
+		pages_huge(chunk, chunksize);
+	}
 
 	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
-	    committed);
+	    sn, committed);
 
 	if (config_stats) {
 		arena->stats.mapped -= chunksize;
@@ -859,14 +915,14 @@ arena_node_dalloc(tsdn_t *tsdn, arena_t 
 
 static void *
 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
-    chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
-    size_t csize)
+    chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
+    bool *zero, size_t csize)
 {
 	void *ret;
 	bool commit = true;
 
 	ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
-	    alignment, zero, &commit);
+	    alignment, sn, zero, &commit);
 	if (ret == NULL) {
 		/* Revert optimistic stats updates. */
 		malloc_mutex_lock(tsdn, &arena->lock);
@@ -883,7 +939,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn
 
 void *
 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
-    size_t alignment, bool *zero)
+    size_t alignment, size_t *sn, bool *zero)
 {
 	void *ret;
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
@@ -900,18 +956,19 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, are
 	arena_nactive_add(arena, usize >> LG_PAGE);
 
 	ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
-	    alignment, zero, &commit, true);
+	    alignment, sn, zero, &commit, true);
 	malloc_mutex_unlock(tsdn, &arena->lock);
 	if (ret == NULL) {
 		ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
-		    usize, alignment, zero, csize);
+		    usize, alignment, sn, zero, csize);
 	}
 
 	return (ret);
 }
 
 void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
+    size_t sn)
 {
 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
 	size_t csize;
@@ -924,7 +981,7 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, ar
 	}
 	arena_nactive_sub(arena, usize >> LG_PAGE);
 
-	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
 	malloc_mutex_unlock(tsdn, &arena->lock);
 }
 
@@ -948,7 +1005,7 @@ arena_chunk_ralloc_huge_similar(tsdn_t *
 
 void
 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
-    size_t oldsize, size_t usize)
+    size_t oldsize, size_t usize, size_t sn)
 {
 	size_t udiff = oldsize - usize;
 	size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@@ -967,7 +1024,7 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *t
 		    CHUNK_CEILING(usize));
 
 		chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
-		    true);
+		    sn, true);
 	}
 	malloc_mutex_unlock(tsdn, &arena->lock);
 }
@@ -975,13 +1032,13 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *t
 static bool
 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
     chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
-    bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+    size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
 {
 	bool err;
 	bool commit = true;
 
 	err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
-	    chunksize, zero, &commit) == NULL);
+	    chunksize, sn, zero, &commit) == NULL);
 	if (err) {
 		/* Revert optimistic stats updates. */
 		malloc_mutex_lock(tsdn, &arena->lock);
@@ -995,7 +1052,7 @@ arena_chunk_ralloc_huge_expand_hard(tsdn
 	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
 	    cdiff, true, arena->ind)) {
 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
-		    *zero, true);
+		    *sn, *zero, true);
 		err = true;
 	}
 	return (err);
@@ -1010,6 +1067,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *t
 	void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
 	size_t udiff = usize - oldsize;
 	size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+	size_t sn;
 	bool commit = true;
 
 	malloc_mutex_lock(tsdn, &arena->lock);
@@ -1022,16 +1080,16 @@ arena_chunk_ralloc_huge_expand(tsdn_t *t
 	arena_nactive_add(arena, udiff >> LG_PAGE);
 
 	err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
-	    chunksize, zero, &commit, true) == NULL);
+	    chunksize, &sn, zero, &commit, true) == NULL);
 	malloc_mutex_unlock(tsdn, &arena->lock);
 	if (err) {
 		err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
-		    &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
-		    cdiff);
+		    &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
+		    udiff, cdiff);
 	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
 	    cdiff, true, arena->ind)) {
 		chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
-		    *zero, true);
+		    sn, *zero, true);
 		err = true;
 	}
 
@@ -1519,6 +1577,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t 
 
 		if (rdelm == &chunkselm->rd) {
 			extent_node_t *chunkselm_next;
+			size_t sn;
 			bool zero, commit;
 			UNUSED void *chunk;
 
@@ -1536,8 +1595,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t 
 			commit = false;
 			chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
 			    extent_node_addr_get(chunkselm),
-			    extent_node_size_get(chunkselm), chunksize, &zero,
-			    &commit, false);
+			    extent_node_size_get(chunkselm), chunksize, &sn,
+			    &zero, &commit, false);
 			assert(chunk == extent_node_addr_get(chunkselm));
 			assert(zero == extent_node_zeroed_get(chunkselm));
 			extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
@@ -1634,6 +1693,17 @@ arena_purge_stashed(tsdn_t *tsdn, arena_
 			run_size = arena_mapbits_large_size_get(chunk, pageind);
 			npages = run_size >> LG_PAGE;
 
+			/*
+			 * If this is the first run purged within chunk, mark
+			 * the chunk as non-huge.  This will prevent all use of

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201612042113.uB4LDRO2006973>