Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 9 Feb 2008 04:23:14 GMT
From:      John Birrell <jb@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 135085 for review
Message-ID:  <200802090423.m194NEoi051872@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=135085

Change 135085 by jb@jb_freebsd1 on 2008/02/09 04:23:04

	IFC

Affected files ...

.. //depot/projects/dtrace/doc/ru_RU.KOI8-R/man/man1/sh.1#2 integrate
.. //depot/projects/dtrace/ports/LEGAL#33 integrate
.. //depot/projects/dtrace/ports/Mk/bsd.sites.mk#22 integrate
.. //depot/projects/dtrace/ports/UPDATING#52 integrate
.. //depot/projects/dtrace/src/UPDATING#24 integrate
.. //depot/projects/dtrace/src/bin/date/date.c#4 integrate
.. //depot/projects/dtrace/src/etc/namedb/named.root#5 integrate
.. //depot/projects/dtrace/src/lib/libc/stdlib/malloc.c#18 integrate
.. //depot/projects/dtrace/src/lib/libfetch/common.c#6 integrate
.. //depot/projects/dtrace/src/lib/libfetch/fetch.3#6 integrate
.. //depot/projects/dtrace/src/lib/libfetch/ftp.c#8 integrate
.. //depot/projects/dtrace/src/lib/libfetch/http.c#8 integrate
.. //depot/projects/dtrace/src/lib/msun/ld128/s_exp2l.c#2 integrate
.. //depot/projects/dtrace/src/lib/msun/ld80/s_exp2l.c#2 integrate
.. //depot/projects/dtrace/src/lib/msun/src/e_exp.c#4 integrate
.. //depot/projects/dtrace/src/lib/msun/src/e_expf.c#6 integrate
.. //depot/projects/dtrace/src/lib/msun/src/s_exp2.c#6 integrate
.. //depot/projects/dtrace/src/lib/msun/src/s_exp2f.c#7 integrate
.. //depot/projects/dtrace/src/lib/msun/src/s_expm1.c#4 integrate
.. //depot/projects/dtrace/src/lib/msun/src/s_expm1f.c#4 integrate
.. //depot/projects/dtrace/src/lib/msun/src/s_logb.c#5 integrate
.. //depot/projects/dtrace/src/lib/msun/src/s_truncl.c#4 integrate
.. //depot/projects/dtrace/src/sbin/ipfw/ipfw.8#17 integrate
.. //depot/projects/dtrace/src/sbin/md5/md5.c#4 integrate
.. //depot/projects/dtrace/src/sys/fs/coda/coda_psdev.c#3 integrate
.. //depot/projects/dtrace/src/sys/fs/coda/coda_subr.c#2 integrate
.. //depot/projects/dtrace/src/sys/fs/coda/coda_venus.c#2 integrate
.. //depot/projects/dtrace/src/sys/fs/coda/coda_vfsops.c#5 integrate
.. //depot/projects/dtrace/src/sys/fs/coda/coda_vnops.c#6 integrate
.. //depot/projects/dtrace/src/sys/fs/coda/coda_vnops.h#2 integrate
.. //depot/projects/dtrace/src/sys/fs/nullfs/null_vfsops.c#9 integrate
.. //depot/projects/dtrace/src/sys/i386/conf/YABBA#2 edit
.. //depot/projects/dtrace/src/sys/kern/kern_lock.c#18 integrate
.. //depot/projects/dtrace/src/sys/kern/kern_rwlock.c#14 integrate
.. //depot/projects/dtrace/src/sys/kern/subr_sleepqueue.c#11 integrate
.. //depot/projects/dtrace/src/sys/kern/subr_turnstile.c#12 integrate
.. //depot/projects/dtrace/src/sys/kern/uipc_shm.c#3 integrate
.. //depot/projects/dtrace/src/sys/kern/vfs_subr.c#30 integrate
.. //depot/projects/dtrace/src/sys/netgraph/netflow/netflow.c#8 integrate
.. //depot/projects/dtrace/src/sys/netinet/in_rmx.c#7 integrate
.. //depot/projects/dtrace/src/sys/netinet/ip_carp.c#11 integrate
.. //depot/projects/dtrace/src/sys/nfs4client/nfs4_vfsops.c#10 integrate
.. //depot/projects/dtrace/src/sys/nfsclient/nfs_vfsops.c#15 integrate
.. //depot/projects/dtrace/src/sys/sys/param.h#38 integrate
.. //depot/projects/dtrace/src/sys/sys/proc.h#34 integrate
.. //depot/projects/dtrace/src/usr.bin/ministat/ministat.c#2 integrate
.. //depot/projects/dtrace/src/usr.bin/netstat/netstat.h#7 integrate
.. //depot/projects/dtrace/src/usr.bin/netstat/route.c#10 integrate
.. //depot/projects/dtrace/src/usr.bin/procstat/procstat.c#2 integrate
.. //depot/projects/dtrace/src/usr.bin/procstat/procstat_basic.c#2 integrate
.. //depot/projects/dtrace/src/usr.bin/procstat/procstat_files.c#4 integrate
.. //depot/projects/dtrace/src/usr.bin/procstat/procstat_kstack.c#2 integrate
.. //depot/projects/dtrace/src/usr.bin/procstat/procstat_threads.c#2 integrate
.. //depot/projects/dtrace/src/usr.bin/procstat/procstat_vm.c#2 integrate
.. //depot/projects/dtrace/src/usr.bin/uniq/uniq.c#5 integrate
.. //depot/projects/dtrace/src/usr.sbin/bootparamd/Makefile#4 integrate
.. //depot/projects/dtrace/src/usr.sbin/bootparamd/Makefile.inc#4 integrate
.. //depot/projects/dtrace/src/usr.sbin/pkg_install/add/pkg_add.1#7 integrate

Differences ...

==== //depot/projects/dtrace/doc/ru_RU.KOI8-R/man/man1/sh.1#2 (text+ko) ====

@@ -31,8 +31,8 @@
 .\"
 .\"	from: @(#)sh.1	8.6 (Berkeley) 5/4/95
 .\" %FreeBSD: src/bin/sh/sh.1,v 1.125.2.1 2007/12/05 14:29:07 ru Exp %
-.\" $FreeBSDru: frdp/doc/ru_RU.KOI8-R/man/man1/sh.1,v 1.2 2007/12/05 14:31:10 ru Exp $
-.\" $FreeBSD: doc/ru_RU.KOI8-R/man/man1/sh.1,v 1.2 2007/12/05 14:32:41 ru Exp $
+.\" $FreeBSDru: frdp/doc/ru_RU.KOI8-R/man/man1/sh.1,v 1.3 2008/02/08 13:05:39 ru Exp $
+.\" $FreeBSD: doc/ru_RU.KOI8-R/man/man1/sh.1,v 1.3 2008/02/08 13:52:03 ru Exp $
 .\"
 .Dd 7 октября 2006
 .Dt SH 1
@@ -222,7 +222,7 @@
 .Dv EOF
 на входе.
 .It Fl i Li interactive
-Насильно установить интерактивный режим работы оболочки.
+Принудительно установить интерактивный режим работы оболочки.
 .It Fl m Li monitor
 Включить управление заданиями (включается автоматически в
 интерактивном режиме).
@@ -247,7 +247,7 @@
 не равен реальному идентификатору пользователя или группы.
 Выключение этого режима устанавливает эффективные идентификаторы
 пользователя и группы в реальные.
-Когда этот режим включен в интерактивной оболочке, то после файла
+Когда этот режим включен в интерактивной оболочке, после файла
 .Pa /etc/profile
 вместо
 .Pa ~/.profile
@@ -425,7 +425,7 @@
 создавать функции с аргументами.
 Они также могут быть
 использованы для создания лексически невразумительного кода.
-Использование псевдонимов для этих целей не рекомендуется.
+Такое их использование не рекомендуется.
 .Pp
 Действие псевдонима может быть отменено в командной строке,
 если использовать экранирование внутри или смежно с именем
@@ -443,7 +443,7 @@
 Основное то, что читается строка, и если первое слово в строке
 (или после оператора управления) не является зарезервированным,
 то оболочка распознала простую команду.
-В противном случае команда распознаётся как составная или как
+В противном случае команда распознаётся как сложная или как
 некоторая другая специальная конструкция.
 .Ss Простые команды
 Когда распознаётся простая команда, оболочка предпринимает
@@ -583,7 +583,7 @@
 программа в файловой системе (как описано в следующем разделе).
 Когда выполняется обычная команда, оболочка запускает эту программу,
 передавая ей аргументы и окружение.
-Если программа не является обычным исполняемым файл
+Если программа не является обычным исполняемым файлом
 (т.е.\& если она не начинается с
 .Dq "магического числа" ,
 чьё
@@ -626,7 +626,7 @@
 .Ev PATH
 должен быть список элементов,
 разделённых двоеточиями.
-В каждом элементе указывается
+Каждый элемент задаёт
 имя каталога.
 Текущий каталог
 может быть задан неявно, пустым именем каталога,
@@ -648,10 +648,10 @@
 номер сигнала.
 Номера сигналов определены в заголовочном файле
 .In sys/signal.h .
-.Ss Составные команды
-Составные команды \[em] это комбинации простых команд
+.Ss Сложные команды
+Сложные команды \[em] это комбинации простых команд
 с управляющими операторами или зарезервированными словами, вместе составляющих
-одну большую составную команду.
+одну большую сложную команду.
 В общем, команда бывает одного из следующих типов:
 .Bl -item -offset indent
 .It
@@ -1520,7 +1520,7 @@
 Обрабатывать управляющие последовательности с обратной косой чертой в стиле C.
 Команда
 .Ic echo
-понимает следующие последовательности символов:
+понимает следующие управляющие последовательности:
 .Bl -tag -width indent
 .It \ea
 Сигнал тревоги (позвонить в звонок терминала)
@@ -1758,7 +1758,7 @@
 Выводит идентификаторы процессов в указанном задании.
 Если аргумент
 .Ar задание
-опущен, используется текущая задание.
+опущен, используется текущее задание.
 .It Ic jobs Oo Fl lps Oc Op Ar задание ...
 Выводит информацию об указанных заданиях или всех заданиях,
 если не указан аргумент
@@ -2060,7 +2060,7 @@
 Максимальное количество одновременно выполняющихся процессов
 для этого идентификатора пользователя.
 .It Fl v Ar virtualmem
-Максимальный виртуальной размер процесса в килобайтах.
+Максимальный виртуальный размер процесса в килобайтах.
 .El
 .It Ic umask Oo Fl S Oc Op Ar маска
 Устанавливает маску создания файлов (см.\&

==== //depot/projects/dtrace/ports/LEGAL#33 (text+ko) ====

@@ -1,5 +1,5 @@
 # Creator:  Jordan Hubbard
-# $FreeBSD: ports/LEGAL,v 1.564 2008/01/09 20:12:41 thierry Exp $
+# $FreeBSD: ports/LEGAL,v 1.565 2008/02/09 02:01:05 tabthorpe Exp $
 
    **********************************************************************
    ***                      NOTE TO COMMITTERS                        ***
@@ -690,3 +690,4 @@
 gamess.*/gamess*	science/gamess		No Redistribution
 vst_sdk2_3.zip		audio/ardour		Redistribution of the VST PlugIns SDK is not allowed
 send-*			net-mgmt/send		Not redistributable, license agreement required
+rapid-*			math/rapid		No commercial use

==== //depot/projects/dtrace/ports/Mk/bsd.sites.mk#22 (text+ko) ====

@@ -20,7 +20,7 @@
 #
 # Note: all entries should terminate with a slash.
 #
-# $FreeBSD: ports/Mk/bsd.sites.mk,v 1.435 2008/02/01 08:05:09 linimon Exp $
+# $FreeBSD: ports/Mk/bsd.sites.mk,v 1.436 2008/02/07 16:13:41 shaun Exp $
 #
 
 # Where to put distfiles that don't have any other master site
@@ -1264,7 +1264,20 @@
 # Updated:	2006-06-13
 .if !defined(IGNORE_MASTER_SITE_VIM)
 MASTER_SITE_VIM+= \
-	ftp://ftp.vim.org/pub/vim/unix/  \
+	http://ftp.vim.org/pub/vim/unix/ \
+	http://mirrors.24-7-solutions.net/pub/vim/unix/ \
+	http://ftp.tw.vim.org/pub/vim/unix/ \
+	http://vim.stu.edu.tw/unix/ \
+	http://gd.tuwien.ac.at/pub/vim/unix/ \
+	http://www.etsimo.uniovi.es/pub/vim/unix/ \
+	http://www.pt.vim.org/pub/vim/unix/ \
+	http://www.pangora.org/vim.org/pub/vim/unix/ \
+	http://www.math.technion.ac.il/pub/vim/unix/ \
+	http://vim.fyxm.net/pub/vim/unix/ \
+	http://zloba.ath.cx/pub/vim/unix/ \
+	http://ftp2.uk.vim.org/sites/ftp.vim.org/pub/vim/unix/ \
+	http://vim.mirror.fr/unix/ \
+	ftp://ftp.vim.org/pub/vim/unix/ \
 	ftp://ftp2.us.vim.org/pub/vim/unix/ \
 	ftp://ftp9.us.vim.org/pub/vim/unix/ \
 	ftp://ftp.ca.vim.org/pub/vim/unix/ \
@@ -1284,20 +1297,7 @@
 	ftp://vim.stu.edu.tw/pub/vim/unix/ \
 	ftp://ftp.jp.vim.org/pub/vim/unix/ \
 	ftp://ftp.kr.vim.org/pub/vim/unix/ \
-	ftp://ftp.mirrorservice.org/sites/ftp.vim.org/pub/vim/unix/ \
-	http://ftp.vim.org/pub/vim/unix/ \
-	http://mirrors.24-7-solutions.net/pub/vim/unix/ \
-	http://ftp.tw.vim.org/pub/vim/unix/ \
-	http://vim.stu.edu.tw/unix/ \
-	http://gd.tuwien.ac.at/pub/vim/unix/ \
-	http://www.etsimo.uniovi.es/pub/vim/unix/ \
-	http://www.pt.vim.org/pub/vim/unix/ \
-	http://www.pangora.org/vim.org/pub/vim/unix/ \
-	http://www.math.technion.ac.il/pub/vim/unix/ \
-	http://vim.fyxm.net/pub/vim/unix/ \
-	http://zloba.ath.cx/pub/vim/unix/ \
-	http://ftp2.uk.vim.org/sites/ftp.vim.org/pub/vim/unix/ \
-	http://vim.mirror.fr/unix/
+	ftp://ftp.mirrorservice.org/sites/ftp.vim.org/pub/vim/unix/
 .endif
 
 .if !defined(IGNORE_MASTER_SITE_WINDOWMAKER)

==== //depot/projects/dtrace/ports/UPDATING#52 (text+ko) ====

@@ -6,6 +6,16 @@
 time you update your ports collection, before attempting any port
 upgrades.
 
+20080208:
+  AFFECTS: users of ports-mgmt/portupgrade
+  AUTHOR: sem@FreeBSD.org
+
+  As was claimed in an enrty 20070301 bellow in this file. You should fill
+  ALT_DEPENDS section of pkgtools.conf for portupgrade works correctly
+  with your alternative dependencies. Now it's an obligatory requirement.
+
+  It was done as a compromise between speed and compexity.
+
 20080203:
   AFFECTS: users of www/xshttpd
   AUTHOR: johans@FreeBSD.org
@@ -6061,4 +6071,4 @@
   2) Update all p5-* modules.
 	portupgrade -f p5-\*
 
-$FreeBSD: ports/UPDATING,v 1.584 2008/02/03 20:46:04 sat Exp $
+$FreeBSD: ports/UPDATING,v 1.586 2008/02/08 13:38:22 sem Exp $

==== //depot/projects/dtrace/src/UPDATING#24 (text+ko) ====

@@ -22,6 +22,10 @@
 	to maximize performance.  (To disable malloc debugging, run
 	ln -s aj /etc/malloc.conf.)
 
+20080208:
+	Belatedly note the addition of m_collapse for compacting
+	mbuf chains.
+
 20080126:
 	The fts(3) structures have been changed to use adequate
 	integer types for their members and so to be able to cope
@@ -969,4 +973,4 @@
 Contact Warner Losh if you have any questions about your use of
 this document.
 
-$FreeBSD: src/UPDATING,v 1.517 2008/01/26 17:09:39 yar Exp $
+$FreeBSD: src/UPDATING,v 1.518 2008/02/08 21:24:58 sam Exp $

==== //depot/projects/dtrace/src/bin/date/date.c#4 (text+ko) ====

@@ -40,7 +40,7 @@
 #endif
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/bin/date/date.c,v 1.47 2005/01/10 08:39:21 imp Exp $");
+__FBSDID("$FreeBSD: src/bin/date/date.c,v 1.48 2008/02/07 16:04:24 ru Exp $");
 
 #include <sys/param.h>
 #include <sys/time.h>
@@ -186,8 +186,10 @@
 	const char *dot, *t;
 	int century;
 
+	lt = localtime(&tval);
+	lt->tm_isdst = -1;		/* divine correct DST */
+
 	if (fmt != NULL) {
-		lt = localtime(&tval);
 		t = strptime(p, fmt, lt);
 		if (t == NULL) {
 			fprintf(stderr, "Failed conversion of ``%s''"
@@ -208,8 +210,6 @@
 			badformat();
 		}
 
-		lt = localtime(&tval);
-
 		if (dot != NULL) {			/* .ss */
 			dot++; /* *dot++ = '\0'; */
 			if (strlen(dot) != 2)
@@ -264,9 +264,6 @@
 		}
 	}
 
-	/* Let mktime() decide whether summer time is in effect. */
-	lt->tm_isdst = -1;
-
 	/* convert broken-down time to GMT clock time */
 	if ((tval = mktime(lt)) == -1)
 		errx(1, "nonexistent time");

==== //depot/projects/dtrace/src/etc/namedb/named.root#5 (text+ko) ====

@@ -1,5 +1,5 @@
 ;
-; $FreeBSD: src/etc/namedb/named.root,v 1.13 2007/11/02 22:37:15 dougb Exp $
+; $FreeBSD: src/etc/namedb/named.root,v 1.14 2008/02/07 06:28:02 dougb Exp $
 ;
 
 ;       This file holds the information on root name servers needed to
@@ -13,14 +13,14 @@
 ;           on server           FTP.INTERNIC.NET
 ;       -OR-                    RS.INTERNIC.NET
 ;
-;       last update:    Nov 01, 2007
-;       related version of root zone:   2007110100
-;
+;       last update:    Feb 04, 2008
+;       related version of root zone:   2008020400
 ;
 ; formerly NS.INTERNIC.NET
 ;
 .                        3600000  IN  NS    A.ROOT-SERVERS.NET.
 A.ROOT-SERVERS.NET.      3600000      A     198.41.0.4
+A.ROOT-SERVERS.NET.      3600000      AAAA  2001:503:BA3E::2:30
 ;
 ; formerly NS1.ISI.EDU
 ;
@@ -46,6 +46,7 @@
 ;
 .                        3600000      NS    F.ROOT-SERVERS.NET.
 F.ROOT-SERVERS.NET.      3600000      A     192.5.5.241
+F.ROOT-SERVERS.NET.      3600000      AAAA  2001:500:2f::f
 ;
 ; formerly NS.NIC.DDN.MIL
 ;
@@ -56,6 +57,7 @@
 ;
 .                        3600000      NS    H.ROOT-SERVERS.NET.
 H.ROOT-SERVERS.NET.      3600000      A     128.63.2.53
+H.ROOT-SERVERS.NET.      3600000      AAAA  2001:500:1::803f:235
 ;
 ; formerly NIC.NORDU.NET
 ;
@@ -66,11 +68,13 @@
 ;
 .                        3600000      NS    J.ROOT-SERVERS.NET.
 J.ROOT-SERVERS.NET.      3600000      A     192.58.128.30
+J.ROOT-SERVERS.NET.      3600000      AAAA  2001:503:C27::2:30
 ;
 ; operated by RIPE NCC
 ;
 .                        3600000      NS    K.ROOT-SERVERS.NET.
 K.ROOT-SERVERS.NET.      3600000      A     193.0.14.129
+K.ROOT-SERVERS.NET.      3600000      AAAA  2001:7fd::1
 ;
 ; operated by ICANN
 ;
@@ -81,4 +85,5 @@
 ;
 .                        3600000      NS    M.ROOT-SERVERS.NET.
 M.ROOT-SERVERS.NET.      3600000      A     202.12.27.33
+M.ROOT-SERVERS.NET.      3600000      AAAA  2001:dc3::35
 ; End of File

==== //depot/projects/dtrace/src/lib/libc/stdlib/malloc.c#18 (text+ko) ====

@@ -134,7 +134,7 @@
 #define	MALLOC_DSS
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.162 2008/02/06 02:59:54 jasone Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.164 2008/02/08 08:02:34 jasone Exp $");
 
 #include "libc_private.h"
 #ifdef MALLOC_DEBUG
@@ -315,7 +315,8 @@
     * trials (each deallocation is a trial), so the actual average threshold
     * for clearing the cache is somewhat lower.
     */
-#  define LAZY_FREE_NPROBES	5
+#  define LAZY_FREE_NPROBES_2POW_MIN	2
+#  define LAZY_FREE_NPROBES_2POW_MAX	3
 #endif
 
 /*
@@ -929,30 +930,24 @@
 static void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
     size_t alloc_size);
 static size_t	arena_salloc(const void *ptr);
+#ifdef MALLOC_LAZY_FREE
+static void	arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t pageind, arena_chunk_map_t *mapelm, unsigned slot);
+#endif
+static void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr);
 static void	arena_ralloc_resize_shrink(arena_t *arena, arena_chunk_t *chunk,
     void *ptr, size_t size, size_t oldsize);
 static bool	arena_ralloc_resize_grow(arena_t *arena, arena_chunk_t *chunk,
     void *ptr, size_t size, size_t oldsize);
 static bool	arena_ralloc_resize(void *ptr, size_t size, size_t oldsize);
 static void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
-#ifdef MALLOC_LAZY_FREE
-static void	arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk,
-    void *ptr, size_t pageind, arena_chunk_map_t *mapelm);
-#endif
-static void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
-    void *ptr);
 static bool	arena_new(arena_t *arena);
 static arena_t	*arenas_extend(unsigned ind);
 static void	*huge_malloc(size_t size, bool zero);
 static void	*huge_palloc(size_t alignment, size_t size);
 static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
 static void	huge_dalloc(void *ptr);
-static void	*imalloc(size_t size);
-static void	*ipalloc(size_t alignment, size_t size);
-static void	*icalloc(size_t size);
-static size_t	isalloc(const void *ptr);
-static void	*iralloc(void *ptr, size_t size);
-static void	idalloc(void *ptr);
 static void	malloc_print_stats(void);
 static bool	malloc_init_hard(void);
 
@@ -2312,6 +2307,7 @@
 			    == 0) {
 				memset((void *)((uintptr_t)chunk + ((run_ind
 				    + i) << pagesize_2pow)), 0, pagesize);
+				/* CHUNK_MAP_UNTOUCHED is cleared below. */
 			}
 		}
 
@@ -2379,6 +2375,8 @@
 		 * Initialize the map to contain one maximal free untouched
 		 * run.
 		 */
+		memset(chunk->map, (CHUNK_MAP_LARGE | CHUNK_MAP_POS_MASK),
+		    arena_chunk_header_npages);
 		memset(&chunk->map[arena_chunk_header_npages],
 		    CHUNK_MAP_UNTOUCHED, (chunk_npages -
 		    arena_chunk_header_npages));
@@ -2498,7 +2496,8 @@
 				if (chunk->map[i] & CHUNK_MAP_DIRTY) {
 					size_t npages;
 
-					chunk->map[i] = 0;
+					chunk->map[i] = (CHUNK_MAP_LARGE |
+					    CHUNK_MAP_POS_MASK);
 					chunk->ndirty--;
 					arena->ndirty--;
 					/* Find adjacent dirty run(s). */
@@ -2507,7 +2506,8 @@
 					    (chunk->map[i - 1] &
 					    CHUNK_MAP_DIRTY); npages++) {
 						i--;
-						chunk->map[i] = 0;
+						chunk->map[i] = (CHUNK_MAP_LARGE
+						    | CHUNK_MAP_POS_MASK);
 						chunk->ndirty--;
 						arena->ndirty--;
 					}
@@ -2556,7 +2556,9 @@
 		size_t i;
 
 		for (i = 0; i < run_pages; i++) {
-			chunk->map[run_ind + i] = CHUNK_MAP_DIRTY;
+			assert((chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) ==
+			    0);
+			chunk->map[run_ind + i] |= CHUNK_MAP_DIRTY;
 			chunk->ndirty++;
 			arena->ndirty++;
 		}
@@ -3005,6 +3007,28 @@
 		return (arena_malloc_large(arena, size, zero));
 }
 
+static inline void *
+imalloc(size_t size)
+{
+
+	assert(size != 0);
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(choose_arena(), size, false));
+	else
+		return (huge_malloc(size, false));
+}
+
+static inline void *
+icalloc(size_t size)
+{
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(choose_arena(), size, true));
+	else
+		return (huge_malloc(size, true));
+}
+
 /* Only handles large allocations that require more than page alignment. */
 static void *
 arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
@@ -3084,6 +3108,101 @@
 	return (ret);
 }
 
+static inline void *
+ipalloc(size_t alignment, size_t size)
+{
+	void *ret;
+	size_t ceil_size;
+
+	/*
+	 * Round size up to the nearest multiple of alignment.
+	 *
+	 * This done, we can take advantage of the fact that for each small
+	 * size class, every object is aligned at the smallest power of two
+	 * that is non-zero in the base two representation of the size.  For
+	 * example:
+	 *
+	 *   Size |   Base 2 | Minimum alignment
+	 *   -----+----------+------------------
+	 *     96 |  1100000 |  32
+	 *    144 | 10100000 |  32
+	 *    192 | 11000000 |  64
+	 *
+	 * Depending on runtime settings, it is possible that arena_malloc()
+	 * will further round up to a power of two, but that never causes
+	 * correctness issues.
+	 */
+	ceil_size = (size + (alignment - 1)) & (-alignment);
+	/*
+	 * (ceil_size < size) protects against the combination of maximal
+	 * alignment and size greater than maximal alignment.
+	 */
+	if (ceil_size < size) {
+		/* size_t overflow. */
+		return (NULL);
+	}
+
+	if (ceil_size <= pagesize || (alignment <= pagesize
+	    && ceil_size <= arena_maxclass))
+		ret = arena_malloc(choose_arena(), ceil_size, false);
+	else {
+		size_t run_size;
+
+		/*
+		 * We can't achieve sub-page alignment, so round up alignment
+		 * permanently; it makes later calculations simpler.
+		 */
+		alignment = PAGE_CEILING(alignment);
+		ceil_size = PAGE_CEILING(size);
+		/*
+		 * (ceil_size < size) protects against very large sizes within
+		 * pagesize of SIZE_T_MAX.
+		 *
+		 * (ceil_size + alignment < ceil_size) protects against the
+		 * combination of maximal alignment and ceil_size large enough
+		 * to cause overflow.  This is similar to the first overflow
+		 * check above, but it needs to be repeated due to the new
+		 * ceil_size value, which may now be *equal* to maximal
+		 * alignment, whereas before we only detected overflow if the
+		 * original size was *greater* than maximal alignment.
+		 */
+		if (ceil_size < size || ceil_size + alignment < ceil_size) {
+			/* size_t overflow. */
+			return (NULL);
+		}
+
+		/*
+		 * Calculate the size of the over-size run that arena_palloc()
+		 * would need to allocate in order to guarantee the alignment.
+		 */
+		if (ceil_size >= alignment)
+			run_size = ceil_size + alignment - pagesize;
+		else {
+			/*
+			 * It is possible that (alignment << 1) will cause
+			 * overflow, but it doesn't matter because we also
+			 * subtract pagesize, which in the case of overflow
+			 * leaves us with a very large run_size.  That causes
+			 * the first conditional below to fail, which means
+			 * that the bogus run_size value never gets used for
+			 * anything important.
+			 */
+			run_size = (alignment << 1) - pagesize;
+		}
+
+		if (run_size <= arena_maxclass) {
+			ret = arena_palloc(choose_arena(), alignment, ceil_size,
+			    run_size);
+		} else if (alignment <= chunksize)
+			ret = huge_malloc(ceil_size, false);
+		else
+			ret = huge_palloc(alignment, ceil_size);
+	}
+
+	assert(((uintptr_t)ret & (alignment - 1)) == 0);
+	return (ret);
+}
+
 /* Return the size of the allocation pointed to by ptr. */
 static size_t
 arena_salloc(const void *ptr)
@@ -3099,12 +3218,11 @@
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
 	mapelm = chunk->map[pageind];
-	if (mapelm != CHUNK_MAP_LARGE) {
+	if ((mapelm & CHUNK_MAP_LARGE) == 0) {
 		arena_run_t *run;
 
 		/* Small allocation size is in the run header. */
-		assert(mapelm <= CHUNK_MAP_POS_MASK);
-		pageind -= mapelm;
+		pageind -= (mapelm & CHUNK_MAP_POS_MASK);
 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
 		    pagesize_2pow));
 		assert(run->magic == ARENA_RUN_MAGIC);
@@ -3127,166 +3245,38 @@
 	return (ret);
 }
 
-static void
-arena_ralloc_resize_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t size, size_t oldsize)
+static inline size_t
+isalloc(const void *ptr)
 {
-	extent_node_t *node, key;
+	size_t ret;
+	arena_chunk_t *chunk;
 
-	assert(size < oldsize);
-
-	/*
-	 * Shrink the run, and make trailing pages available for other
-	 * allocations.
-	 */
-	key.addr = (void *)((uintptr_t)ptr);
-#ifdef MALLOC_BALANCE
-	arena_lock_balance(arena);
-#else
-	malloc_spin_lock(&arena->lock);
-#endif
-	node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-	assert(node != NULL);
-	arena_run_trim_tail(arena, chunk, node, (arena_run_t *)ptr, oldsize,
-	    size, true);
-#ifdef MALLOC_STATS
-	arena->stats.allocated_large -= oldsize - size;
-#endif
-	malloc_spin_unlock(&arena->lock);
-}
-
-static bool
-arena_ralloc_resize_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t size, size_t oldsize)
-{
-	extent_node_t *nodeC, key;
-
-	/* Try to extend the run. */
-	assert(size > oldsize);
-	key.addr = (void *)((uintptr_t)ptr + oldsize);
-#ifdef MALLOC_BALANCE
-	arena_lock_balance(arena);
-#else
-	malloc_spin_lock(&arena->lock);
-#endif
-	nodeC = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-	if (nodeC != NULL && oldsize + nodeC->size >= size) {
-		extent_node_t *nodeA, *nodeB;
-
-		/*
-		 * The next run is available and sufficiently large.  Split the
-		 * following run, then merge the first part with the existing
-		 * allocation.  This results in a bit more tree manipulation
-		 * than absolutely necessary, but it substantially simplifies
-		 * the code.
-		 */
-		arena_run_split(arena, (arena_run_t *)nodeC->addr, size -
-		    oldsize, false, false);
-
-		key.addr = ptr;
-		nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
-		    &key);
-		assert(nodeA != NULL);
-
-		key.addr = (void *)((uintptr_t)ptr + oldsize);
-		nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
-		    &key);
-		assert(nodeB != NULL);
-
-		nodeA->size += nodeB->size;
-
-		RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
-		arena_chunk_node_dealloc(chunk, nodeB);
-
-#ifdef MALLOC_STATS
-		arena->stats.allocated_large += size - oldsize;
-#endif
-		malloc_spin_unlock(&arena->lock);
-		return (false);
-	}
-	malloc_spin_unlock(&arena->lock);
-
-	return (true);
-}
+	assert(ptr != NULL);
 
-/*
- * Try to resize a large allocation, in order to avoid copying.  This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_resize(void *ptr, size_t size, size_t oldsize)
-{
-	arena_chunk_t *chunk;
-	arena_t *arena;
-
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	arena = chunk->arena;
-	assert(arena->magic == ARENA_MAGIC);
+	if (chunk != ptr) {
+		/* Region. */
+		assert(chunk->arena->magic == ARENA_MAGIC);
 
-	if (size < oldsize) {
-		arena_ralloc_resize_shrink(arena, chunk, ptr, size, oldsize);
-		return (false);
+		ret = arena_salloc(ptr);
 	} else {
-		return (arena_ralloc_resize_grow(arena, chunk, ptr, size,
-		    oldsize));
-	}
-}
+		extent_node_t *node, key;
 
-static void *
-arena_ralloc(void *ptr, size_t size, size_t oldsize)
-{
-	void *ret;
+		/* Chunk (huge allocation). */
 
-	/* Try to avoid moving the allocation. */
-	if (size < small_min) {
-		if (oldsize < small_min &&
-		    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
-		    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
-			goto IN_PLACE; /* Same size class. */
-	} else if (size <= small_max) {
-		if (oldsize >= small_min && oldsize <= small_max &&
-		    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
-		    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
-			goto IN_PLACE; /* Same size class. */
-	} else if (size <= bin_maxclass) {
-		if (oldsize > small_max && oldsize <= bin_maxclass &&
-		    pow2_ceil(size) == pow2_ceil(oldsize))
-			goto IN_PLACE; /* Same size class. */
-	} else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
-		size_t psize;
+		malloc_mutex_lock(&huge_mtx);
 
-		assert(size > bin_maxclass);
-		psize = PAGE_CEILING(size);
+		/* Extract from tree of huge allocations. */
+		key.addr = __DECONST(void *, ptr);
+		node = RB_FIND(extent_tree_ad_s, &huge, &key);
+		assert(node != NULL);
 
-		if (psize == oldsize)
-			goto IN_PLACE; /* Same size class. */
+		ret = node->size;
 
-		if (arena_ralloc_resize(ptr, psize, oldsize) == false)
-			goto IN_PLACE;
+		malloc_mutex_unlock(&huge_mtx);
 	}
 
-	/*
-	 * If we get here, then size and oldsize are different enough that we
-	 * need to move the object.  In that case, fall back to allocating new
-	 * space and copying.
-	 */
-	ret = arena_malloc(choose_arena(), size, false);
-	if (ret == NULL)
-		return (NULL);
-
-	/* Junk/zero-filling were already done by arena_malloc(). */
-	if (size < oldsize)
-		memcpy(ret, ptr, size);
-	else
-		memcpy(ret, ptr, oldsize);
-	idalloc(ptr);
 	return (ret);
-IN_PLACE:
-	if (opt_junk && size < oldsize)
-		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
-	else if (opt_zero && size > oldsize)
-		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
-	return (ptr);
 }
 
 static inline void
@@ -3297,8 +3287,7 @@
 	arena_bin_t *bin;
 	size_t size;
 
-	assert(mapelm <= CHUNK_MAP_POS_MASK);
-	pageind -= mapelm;
+	pageind -= (mapelm & CHUNK_MAP_POS_MASK);
 
 	run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
 	assert(run->magic == ARENA_RUN_MAGIC);
@@ -3360,7 +3349,7 @@
     size_t pageind, arena_chunk_map_t *mapelm)
 {
 	void **free_cache = arena->free_cache;
-	unsigned i, slot;
+	unsigned i, nprobes, slot;
 
 	if (__isthreaded == false || opt_lazy_free_2pow < 0) {
 		malloc_spin_lock(&arena->lock);
@@ -3369,7 +3358,9 @@
 		return;
 	}
 
-	for (i = 0; i < LAZY_FREE_NPROBES; i++) {
+	nprobes = (1U << LAZY_FREE_NPROBES_2POW_MIN) + PRN(lazy_free,
+	    (LAZY_FREE_NPROBES_2POW_MAX - LAZY_FREE_NPROBES_2POW_MIN));
+	for (i = 0; i < nprobes; i++) {
 		slot = PRN(lazy_free, opt_lazy_free_2pow);
 		if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot],
 		    (uintptr_t)NULL, (uintptr_t)ptr)) {
@@ -3377,15 +3368,15 @@
 		}
 	}
 
-	arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm);
+	arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm, slot);
 }
 
 static void
 arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    size_t pageind, arena_chunk_map_t *mapelm)
+    size_t pageind, arena_chunk_map_t *mapelm, unsigned slot)
 {
 	void **free_cache = arena->free_cache;
-	unsigned i, slot;
+	unsigned i;
 
 	malloc_spin_lock(&arena->lock);
 	arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
@@ -3486,9 +3477,8 @@
 
 	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
 	mapelm = &chunk->map[pageind];
-	if (*mapelm != CHUNK_MAP_LARGE) {
+	if ((*mapelm & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
-		assert(*mapelm <= CHUNK_MAP_POS_MASK);
 #ifdef MALLOC_LAZY_FREE
 		arena_dalloc_lazy(arena, chunk, ptr, pageind, mapelm);
 #else
@@ -3502,6 +3492,197 @@
 	}
 }
 
+static inline void
+idalloc(void *ptr)
+{
+	arena_chunk_t *chunk;
+
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr)
+		arena_dalloc(chunk->arena, chunk, ptr);
+	else
+		huge_dalloc(ptr);
+}
+
+static void
+arena_ralloc_resize_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t size, size_t oldsize)
+{
+	extent_node_t *node, key;
+
+	assert(size < oldsize);
+
+	/*
+	 * Shrink the run, and make trailing pages available for other
+	 * allocations.
+	 */
+	key.addr = (void *)((uintptr_t)ptr);
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
+	node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+	assert(node != NULL);
+	arena_run_trim_tail(arena, chunk, node, (arena_run_t *)ptr, oldsize,
+	    size, true);
+#ifdef MALLOC_STATS
+	arena->stats.allocated_large -= oldsize - size;
+#endif
+	malloc_spin_unlock(&arena->lock);
+}
+
+static bool
+arena_ralloc_resize_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t size, size_t oldsize)
+{
+	extent_node_t *nodeC, key;
+
+	/* Try to extend the run. */
+	assert(size > oldsize);
+	key.addr = (void *)((uintptr_t)ptr + oldsize);
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
+	nodeC = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+	if (nodeC != NULL && oldsize + nodeC->size >= size) {
+		extent_node_t *nodeA, *nodeB;
+
+		/*
+		 * The next run is available and sufficiently large.  Split the
+		 * following run, then merge the first part with the existing
+		 * allocation.  This results in a bit more tree manipulation
+		 * than absolutely necessary, but it substantially simplifies
+		 * the code.
+		 */
+		arena_run_split(arena, (arena_run_t *)nodeC->addr, size -
+		    oldsize, false, false);
+
+		key.addr = ptr;
+		nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+		    &key);
+		assert(nodeA != NULL);
+
+		key.addr = (void *)((uintptr_t)ptr + oldsize);
+		nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+		    &key);
+		assert(nodeB != NULL);
+
+		nodeA->size += nodeB->size;
+
+		RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+		arena_chunk_node_dealloc(chunk, nodeB);
+
+#ifdef MALLOC_STATS
+		arena->stats.allocated_large += size - oldsize;
+#endif
+		malloc_spin_unlock(&arena->lock);
+		return (false);
+	}
+	malloc_spin_unlock(&arena->lock);
+
+	return (true);
+}
+
+/*
+ * Try to resize a large allocation, in order to avoid copying.  This will
+ * always fail if growing an object, and the following run is already in use.

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200802090423.m194NEoi051872>