Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 20 Sep 2005 20:54:32 GMT
From:      Peter Wemm <peter@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 84000 for review
Message-ID:  <200509202054.j8KKsWQP098057@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=84000

Change 84000 by peter@peter_daintree on 2005/09/20 20:54:07

	checkpoint

Affected files ...

.. //depot/projects/hammer/lib/libc/stdlib/malloc.c#18 edit

Differences ...

==== //depot/projects/hammer/lib/libc/stdlib/malloc.c#18 (text+ko) ====

@@ -21,6 +21,14 @@
 #undef MALLOC_EXTRA_SANITY
 
 /*
+ * If defined, once sbrk(3) space is exhausted, spill over into mmap(2) space
+ * that is backed by MAP_ANON.  Note that each mmap consumes a VM pager object,
+ * so we allocate slabs via mmap and sub-allocate from there.
+ * If defined, this is the slab size.
+ */
+#define	MALLOC_MMAP	(32 * 1024 * 1024)
+
+/*
  * What to use for Junk.  This is the byte value we use to fill with
  * when the 'J' option is enabled.
  */
@@ -320,6 +328,61 @@
     _malloc_message(_getprogname(), malloc_func, " warning: ", p);
 }
 
+#ifdef MALLOC_MMAP
+static void *mmap_base;
+static void *mmap_end;
+static void *mmap_brk;
+
+
+static void *
+mmap_chunk(size_t pages)
+{
+	void *mapbase;
+	size_t size;
+
+	size = MALLOC_MMAP;	/* 32MB chunks for extension */
+	if (pages * malloc_pagesize > size)
+		size = pages * malloc_pagesize;
+	mapbase = MMAP(size);
+	if (mapbase == (void *)-1)
+		return (NULL);
+	/* Free up any leftovers of any previous last chunk */
+	if (mmap_brk < mmap_end)
+		munmap(mmap_brk, mmap_end - mmap_brk);
+	mmap_base = mapbase;
+	mmap_end = mapbase + size;
+	mmap_brk = mapbase;
+	return (mapbase);
+}
+
+static void *
+mmap_pages(size_t pages)
+{
+	caddr_t result, tail;
+	size_t size;
+	void *ret;
+
+	size = pages * malloc_pagesize;
+	/* Grab a new slab as needed */
+	if (mmap_base == NULL || (mmap_brk + size) > mmap_end) {
+		ret = mmap_chunk(pages);
+		if (ret == NULL)
+			return (NULL);
+	}
+	/* Now suballoc */
+	result = mmap_brk;
+	tail = result + size;
+	if (tail < result)
+		return (NULL);
+	mmap_brk = tail;
+	/* Update accounting and page directory index */
+	last_index = ptr2index(tail) - 1;
+	if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index))
+		return (NULL);
+	return (result);
+}
+#endif
+
 /*
  * Allocate a number of pages from the OS
  */
@@ -334,6 +397,16 @@
 	return (NULL);
 
     if (brk(tail)) {
+#ifdef MALLOC_MMAP
+	/*
+	 * If we can't fit it on the heap, try mmap.  Note that this might just
+	 * be a malloc that can't fit in the heap, there might still be more heap
+	 * space left.  We can't stop checking for whether future operations might
+	 * fit, or if something is free'd from the heap and it shrinks.
+	 */
+	if (errno == ENOMEM)
+	    return (mmap_pages(pages));
+#endif
 #ifdef MALLOC_EXTRA_SANITY
 	wrterror("(ES): map_pages fails\n");
 #endif /* MALLOC_EXTRA_SANITY */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200509202054.j8KKsWQP098057>