Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 17 Feb 2014 17:12:11 +0000 (UTC)
From:      Andriy Gapon <avg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r262095 - in stable/8: cddl/contrib/opensolaris/cmd/zdb cddl/contrib/opensolaris/cmd/zpool cddl/contrib/opensolaris/cmd/ztest sys/cddl/contrib/opensolaris/common/zfs sys/cddl/contrib/op...
Message-ID:  <201402171712.s1HHCCxb055774@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: avg
Date: Mon Feb 17 17:12:11 2014
New Revision: 262095
URL: http://svnweb.freebsd.org/changeset/base/262095

Log:
  MFC r258717: MFV r258371,r258372: 4101 metaslab_debug should allow for
  fine-grained control

Added:
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/range_tree.c
     - copied unchanged from r258717, head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/range_tree.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_reftree.c
     - copied unchanged from r258717, head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_reftree.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/range_tree.h
     - copied unchanged from r258717, head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/range_tree.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/space_reftree.h
     - copied unchanged from r258717, head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/space_reftree.h
Modified:
  stable/8/cddl/contrib/opensolaris/cmd/zdb/zdb.c
  stable/8/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7
  stable/8/cddl/contrib/opensolaris/cmd/ztest/ztest.c
  stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.c
  stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/Makefile.files
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/space_map.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/vdev_impl.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfeature.h
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c
  stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfeature.c
Directory Properties:
  stable/8/cddl/contrib/opensolaris/   (props changed)
  stable/8/cddl/contrib/opensolaris/cmd/zfs/   (props changed)
  stable/8/cddl/contrib/opensolaris/lib/libzfs/   (props changed)
  stable/8/sys/   (props changed)
  stable/8/sys/cddl/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)

Modified: stable/8/cddl/contrib/opensolaris/cmd/zdb/zdb.c
==============================================================================
--- stable/8/cddl/contrib/opensolaris/cmd/zdb/zdb.c	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/cddl/contrib/opensolaris/cmd/zdb/zdb.c	Mon Feb 17 17:12:11 2014	(r262095)
@@ -242,7 +242,7 @@ const char histo_stars[] = "************
 const int histo_width = sizeof (histo_stars) - 1;
 
 static void
-dump_histogram(const uint64_t *histo, int size)
+dump_histogram(const uint64_t *histo, int size, int offset)
 {
 	int i;
 	int minidx = size - 1;
@@ -263,7 +263,7 @@ dump_histogram(const uint64_t *histo, in
 
 	for (i = minidx; i <= maxidx; i++) {
 		(void) printf("\t\t\t%3u: %6llu %s\n",
-		    i, (u_longlong_t)histo[i],
+		    i + offset, (u_longlong_t)histo[i],
 		    &histo_stars[(max - histo[i]) * histo_width / max]);
 	}
 }
@@ -316,19 +316,19 @@ dump_zap_stats(objset_t *os, uint64_t ob
 	    (u_longlong_t)zs.zs_salt);
 
 	(void) printf("\t\tLeafs with 2^n pointers:\n");
-	dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE);
+	dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
 
 	(void) printf("\t\tBlocks with n*5 entries:\n");
-	dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE);
+	dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
 
 	(void) printf("\t\tBlocks n/10 full:\n");
-	dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE);
+	dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
 
 	(void) printf("\t\tEntries with n chunks:\n");
-	dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE);
+	dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
 
 	(void) printf("\t\tBuckets with n entries:\n");
-	dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE);
+	dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
 }
 
 /*ARGSUSED*/
@@ -517,26 +517,85 @@ dump_zpldir(objset_t *os, uint64_t objec
 	zap_cursor_fini(&zc);
 }
 
+int
+get_dtl_refcount(vdev_t *vd)
+{
+	int refcount = 0;
+
+	if (vd->vdev_ops->vdev_op_leaf) {
+		space_map_t *sm = vd->vdev_dtl_sm;
+
+		if (sm != NULL &&
+		    sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
+			return (1);
+		return (0);
+	}
+
+	for (int c = 0; c < vd->vdev_children; c++)
+		refcount += get_dtl_refcount(vd->vdev_child[c]);
+	return (refcount);
+}
+
+int
+get_metaslab_refcount(vdev_t *vd)
+{
+	int refcount = 0;
+
+	if (vd->vdev_top == vd) {
+		for (int m = 0; m < vd->vdev_ms_count; m++) {
+			space_map_t *sm = vd->vdev_ms[m]->ms_sm;
+
+			if (sm != NULL &&
+			    sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
+				refcount++;
+		}
+	}
+	for (int c = 0; c < vd->vdev_children; c++)
+		refcount += get_metaslab_refcount(vd->vdev_child[c]);
+
+	return (refcount);
+}
+
+static int
+verify_spacemap_refcounts(spa_t *spa)
+{
+	int expected_refcount, actual_refcount;
+
+	expected_refcount = spa_feature_get_refcount(spa,
+	    &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM]);
+	actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
+	actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
+
+	if (expected_refcount != actual_refcount) {
+		(void) printf("space map refcount mismatch: expected %d != "
+		    "actual %d\n", expected_refcount, actual_refcount);
+		return (2);
+	}
+	return (0);
+}
+
 static void
-dump_spacemap(objset_t *os, space_map_obj_t *smo, space_map_t *sm)
+dump_spacemap(objset_t *os, space_map_t *sm)
 {
 	uint64_t alloc, offset, entry;
-	uint8_t mapshift = sm->sm_shift;
-	uint64_t mapstart = sm->sm_start;
 	char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
 			    "INVALID", "INVALID", "INVALID", "INVALID" };
 
-	if (smo->smo_object == 0)
+	if (sm == NULL)
 		return;
 
 	/*
 	 * Print out the freelist entries in both encoded and decoded form.
 	 */
 	alloc = 0;
-	for (offset = 0; offset < smo->smo_objsize; offset += sizeof (entry)) {
-		VERIFY3U(0, ==, dmu_read(os, smo->smo_object, offset,
+	for (offset = 0; offset < space_map_length(sm);
+	    offset += sizeof (entry)) {
+		uint8_t mapshift = sm->sm_shift;
+
+		VERIFY0(dmu_read(os, space_map_object(sm), offset,
 		    sizeof (entry), &entry, DMU_READ_PREFETCH));
 		if (SM_DEBUG_DECODE(entry)) {
+
 			(void) printf("\t    [%6llu] %s: txg %llu, pass %llu\n",
 			    (u_longlong_t)(offset / sizeof (entry)),
 			    ddata[SM_DEBUG_ACTION_DECODE(entry)],
@@ -548,10 +607,10 @@ dump_spacemap(objset_t *os, space_map_ob
 			    (u_longlong_t)(offset / sizeof (entry)),
 			    SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F',
 			    (u_longlong_t)((SM_OFFSET_DECODE(entry) <<
-			    mapshift) + mapstart),
+			    mapshift) + sm->sm_start),
 			    (u_longlong_t)((SM_OFFSET_DECODE(entry) <<
-			    mapshift) + mapstart + (SM_RUN_DECODE(entry) <<
-			    mapshift)),
+			    mapshift) + sm->sm_start +
+			    (SM_RUN_DECODE(entry) << mapshift)),
 			    (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift));
 			if (SM_TYPE_DECODE(entry) == SM_ALLOC)
 				alloc += SM_RUN_DECODE(entry) << mapshift;
@@ -559,10 +618,10 @@ dump_spacemap(objset_t *os, space_map_ob
 				alloc -= SM_RUN_DECODE(entry) << mapshift;
 		}
 	}
-	if (alloc != smo->smo_alloc) {
+	if (alloc != space_map_allocated(sm)) {
 		(void) printf("space_map_object alloc (%llu) INCONSISTENT "
 		    "with space map summary (%llu)\n",
-		    (u_longlong_t)smo->smo_alloc, (u_longlong_t)alloc);
+		    (u_longlong_t)space_map_allocated(sm), (u_longlong_t)alloc);
 	}
 }
 
@@ -570,15 +629,17 @@ static void
 dump_metaslab_stats(metaslab_t *msp)
 {
 	char maxbuf[32];
-	space_map_t *sm = msp->ms_map;
-	avl_tree_t *t = sm->sm_pp_root;
-	int free_pct = sm->sm_space * 100 / sm->sm_size;
+	range_tree_t *rt = msp->ms_tree;
+	avl_tree_t *t = &msp->ms_size_tree;
+	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
 
-	zdb_nicenum(space_map_maxsize(sm), maxbuf);
+	zdb_nicenum(metaslab_block_maxsize(msp), maxbuf);
 
 	(void) printf("\t %25s %10lu   %7s  %6s   %4s %4d%%\n",
 	    "segments", avl_numnodes(t), "maxsize", maxbuf,
 	    "freepct", free_pct);
+	(void) printf("\tIn-memory histogram:\n");
+	dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
 }
 
 static void
@@ -586,33 +647,45 @@ dump_metaslab(metaslab_t *msp)
 {
 	vdev_t *vd = msp->ms_group->mg_vd;
 	spa_t *spa = vd->vdev_spa;
-	space_map_t *sm = msp->ms_map;
-	space_map_obj_t *smo = &msp->ms_smo;
+	space_map_t *sm = msp->ms_sm;
 	char freebuf[32];
 
-	zdb_nicenum(sm->sm_size - smo->smo_alloc, freebuf);
+	zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf);
 
 	(void) printf(
 	    "\tmetaslab %6llu   offset %12llx   spacemap %6llu   free    %5s\n",
-	    (u_longlong_t)(sm->sm_start / sm->sm_size),
-	    (u_longlong_t)sm->sm_start, (u_longlong_t)smo->smo_object, freebuf);
+	    (u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
+	    (u_longlong_t)space_map_object(sm), freebuf);
 
-	if (dump_opt['m'] > 1 && !dump_opt['L']) {
+	if (dump_opt['m'] > 2 && !dump_opt['L']) {
 		mutex_enter(&msp->ms_lock);
-		space_map_load_wait(sm);
-		if (!sm->sm_loaded)
-			VERIFY(space_map_load(sm, zfs_metaslab_ops,
-			    SM_FREE, smo, spa->spa_meta_objset) == 0);
+		metaslab_load_wait(msp);
+		if (!msp->ms_loaded) {
+			VERIFY0(metaslab_load(msp));
+			range_tree_stat_verify(msp->ms_tree);
+		}
 		dump_metaslab_stats(msp);
-		space_map_unload(sm);
+		metaslab_unload(msp);
 		mutex_exit(&msp->ms_lock);
 	}
 
-	if (dump_opt['d'] > 5 || dump_opt['m'] > 2) {
-		ASSERT(sm->sm_size == (1ULL << vd->vdev_ms_shift));
+	if (dump_opt['m'] > 1 && sm != NULL &&
+	    spa_feature_is_active(spa,
+	    &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM])) {
+		/*
+		 * The space map histogram represents free space in chunks
+		 * of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
+		 */
+		(void) printf("\tOn-disk histogram:\n");
+		dump_histogram(sm->sm_phys->smp_histogram,
+		    SPACE_MAP_HISTOGRAM_SIZE(sm), sm->sm_shift);
+	}
+
+	if (dump_opt['d'] > 5 || dump_opt['m'] > 3) {
+		ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
 
 		mutex_enter(&msp->ms_lock);
-		dump_spacemap(spa->spa_meta_objset, smo, sm);
+		dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
 		mutex_exit(&msp->ms_lock);
 	}
 }
@@ -801,9 +874,9 @@ dump_all_ddts(spa_t *spa)
 }
 
 static void
-dump_dtl_seg(space_map_t *sm, uint64_t start, uint64_t size)
+dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
 {
-	char *prefix = (void *)sm;
+	char *prefix = arg;
 
 	(void) printf("%s [%llu,%llu) length %llu\n",
 	    prefix,
@@ -833,17 +906,16 @@ dump_dtl(vdev_t *vd, int indent)
 	    required ? "DTL-required" : "DTL-expendable");
 
 	for (int t = 0; t < DTL_TYPES; t++) {
-		space_map_t *sm = &vd->vdev_dtl[t];
-		if (sm->sm_space == 0)
+		range_tree_t *rt = vd->vdev_dtl[t];
+		if (range_tree_space(rt) == 0)
 			continue;
 		(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
 		    indent + 2, "", name[t]);
-		mutex_enter(sm->sm_lock);
-		space_map_walk(sm, dump_dtl_seg, (void *)prefix);
-		mutex_exit(sm->sm_lock);
+		mutex_enter(rt->rt_lock);
+		range_tree_walk(rt, dump_dtl_seg, prefix);
+		mutex_exit(rt->rt_lock);
 		if (dump_opt['d'] > 5 && vd->vdev_children == 0)
-			dump_spacemap(spa->spa_meta_objset,
-			    &vd->vdev_dtl_smo, sm);
+			dump_spacemap(spa->spa_meta_objset, vd->vdev_dtl_sm);
 	}
 
 	for (int c = 0; c < vd->vdev_children; c++)
@@ -2172,39 +2244,17 @@ zdb_blkptr_cb(spa_t *spa, zilog_t *zilog
 }
 
 static void
-zdb_leak(space_map_t *sm, uint64_t start, uint64_t size)
+zdb_leak(void *arg, uint64_t start, uint64_t size)
 {
-	vdev_t *vd = sm->sm_ppd;
+	vdev_t *vd = arg;
 
 	(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
 }
 
-/* ARGSUSED */
-static void
-zdb_space_map_load(space_map_t *sm)
-{
-}
-
-static void
-zdb_space_map_unload(space_map_t *sm)
-{
-	space_map_vacate(sm, zdb_leak, sm);
-}
-
-/* ARGSUSED */
-static void
-zdb_space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
-{
-}
-
-static space_map_ops_t zdb_space_map_ops = {
-	zdb_space_map_load,
-	zdb_space_map_unload,
+static metaslab_ops_t zdb_metaslab_ops = {
 	NULL,	/* alloc */
-	zdb_space_map_claim,
-	NULL,	/* free */
-	NULL	/* maxsize */
+	NULL	/* fragmented */
 };
 
 static void
@@ -2259,11 +2309,21 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
 			for (int m = 0; m < vd->vdev_ms_count; m++) {
 				metaslab_t *msp = vd->vdev_ms[m];
 				mutex_enter(&msp->ms_lock);
-				space_map_unload(msp->ms_map);
-				VERIFY(space_map_load(msp->ms_map,
-				    &zdb_space_map_ops, SM_ALLOC, &msp->ms_smo,
-				    spa->spa_meta_objset) == 0);
-				msp->ms_map->sm_ppd = vd;
+				metaslab_unload(msp);
+
+				/*
+				 * For leak detection, we overload the metaslab
+				 * ms_tree to contain allocated segments
+				 * instead of free segments. As a result,
+				 * we can't use the normal metaslab_load/unload
+				 * interfaces.
+				 */
+				if (msp->ms_sm != NULL) {
+					msp->ms_ops = &zdb_metaslab_ops;
+					VERIFY0(space_map_load(msp->ms_sm,
+					    msp->ms_tree, SM_ALLOC));
+					msp->ms_loaded = B_TRUE;
+				}
 				mutex_exit(&msp->ms_lock);
 			}
 		}
@@ -2286,7 +2346,20 @@ zdb_leak_fini(spa_t *spa)
 			for (int m = 0; m < vd->vdev_ms_count; m++) {
 				metaslab_t *msp = vd->vdev_ms[m];
 				mutex_enter(&msp->ms_lock);
-				space_map_unload(msp->ms_map);
+
+				/*
+				 * The ms_tree has been overloaded to
+				 * contain allocated segments. Now that we
+				 * finished traversing all blocks, any
+				 * block that remains in the ms_tree
+				 * represents an allocated block that we
+				 * did not claim during the traversal.
+				 * Claimed blocks would have been removed
+				 * from the ms_tree.
+				 */
+				range_tree_vacate(msp->ms_tree, zdb_leak, vd);
+				msp->ms_loaded = B_FALSE;
+
 				mutex_exit(&msp->ms_lock);
 			}
 		}
@@ -2489,7 +2562,7 @@ dump_block_stats(spa_t *spa)
 					    "(in 512-byte sectors): "
 					    "number of blocks\n");
 					dump_histogram(zb->zb_psize_histogram,
-					    PSIZE_HISTO_SIZE);
+					    PSIZE_HISTO_SIZE, 0);
 				}
 			}
 		}
@@ -2659,6 +2732,9 @@ dump_zpool(spa_t *spa)
 	if (dump_opt['b'] || dump_opt['c'])
 		rc = dump_block_stats(spa);
 
+	if (rc == 0)
+		rc = verify_spacemap_refcounts(spa);
+
 	if (dump_opt['s'])
 		show_pool_stats(spa);
 

Modified: stable/8/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7
==============================================================================
--- stable/8/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7	Mon Feb 17 17:12:11 2014	(r262095)
@@ -23,7 +23,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd September 20, 2013
+.Dd October 08, 2013
 .Dt ZPOOL-FEATURES 7
 .Os
 .Sh NAME
@@ -251,6 +251,24 @@ configuration.
 .\" .Xr dumpon 8
 .\" command to configure a
 .\" dump device on a pool comprised of multiple vdevs.
+.It Sy spacemap_histogram
+.Bl -column "READ\-ONLY COMPATIBLE" "com.delphix:spacemap_histogram"
+.It GUID Ta com.delphix:spacemap_histogram
+.It READ\-ONLY COMPATIBLE Ta yes
+.It DEPENDENCIES Ta none
+.El
+.Pp
+This features allows ZFS to maintain more information about how free space
+is organized within the pool. If this feature is 
+.Sy enabled ,
+ZFS will
+set this feature to
+.Sy active
+when a new space map object is created or
+an existing space map is upgraded to the new format.
+Once the feature is
+.Sy active ,
+it will remain in that state until the pool is destroyed.
 .El
 .Sh SEE ALSO
 .Xr zpool 8

Modified: stable/8/cddl/contrib/opensolaris/cmd/ztest/ztest.c
==============================================================================
--- stable/8/cddl/contrib/opensolaris/cmd/ztest/ztest.c	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/cddl/contrib/opensolaris/cmd/ztest/ztest.c	Mon Feb 17 17:12:11 2014	(r262095)
@@ -5339,7 +5339,7 @@ ztest_deadman_thread(void *arg)
 		 * vdev_deadman() discovers that there hasn't been any recent
 		 * I/Os then it will end up aborting the tests.
 		 */
-		if (spa_suspended(spa)) {
+		if (spa_suspended(spa) || spa->spa_root_vdev == NULL) {
 			fatal(0, "aborting test after %llu seconds because "
 			    "pool has transitioned to a suspended state.",
 			    zfs_deadman_synctime_ms / 1000);

Modified: stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.c
==============================================================================
--- stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.c	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.c	Mon Feb 17 17:12:11 2014	(r262095)
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
  */
@@ -163,4 +163,7 @@ zpool_feature_init(void)
 	zfeature_register(SPA_FEATURE_MULTI_VDEV_CRASH_DUMP,
 	    "com.joyent:multi_vdev_crash_dump", "multi_vdev_crash_dump",
 	    "Crash dumps to multiple vdev pools.", B_FALSE, B_FALSE, NULL);
+	zfeature_register(SPA_FEATURE_SPACEMAP_HISTOGRAM,
+	    "com.delphix:spacemap_histogram", "spacemap_histogram",
+	    "Spacemaps maintain space histograms.", B_TRUE, B_FALSE, NULL);
 }

Modified: stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.h
==============================================================================
--- stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.h	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/sys/cddl/contrib/opensolaris/common/zfs/zfeature_common.h	Mon Feb 17 17:12:11 2014	(r262095)
@@ -20,7 +20,7 @@
  */
 
 /*
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
  */
@@ -55,6 +55,7 @@ static enum spa_feature {
 	SPA_FEATURE_EMPTY_BPOBJ,
 	SPA_FEATURE_LZ4_COMPRESS,
 	SPA_FEATURE_MULTI_VDEV_CRASH_DUMP,
+	SPA_FEATURE_SPACEMAP_HISTOGRAM,
 	SPA_FEATURES
 } spa_feature_t;
 

Modified: stable/8/sys/cddl/contrib/opensolaris/uts/common/Makefile.files
==============================================================================
--- stable/8/sys/cddl/contrib/opensolaris/uts/common/Makefile.files	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/sys/cddl/contrib/opensolaris/uts/common/Makefile.files	Mon Feb 17 17:12:11 2014	(r262095)
@@ -21,7 +21,8 @@
 
 #
 # Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
-# Copyright (c) 2012 by Delphix. All rights reserved.
+# Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved.
+# Copyright (c) 2013 by Delphix. All rights reserved.
 # Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
 #
 #
@@ -62,6 +63,7 @@ ZFS_COMMON_OBJS +=		\
 	lz4.o			\
 	lzjb.o			\
 	metaslab.o		\
+	range_tree.o		\
 	refcount.o		\
 	rrwlock.o		\
 	sa.o			\
@@ -72,6 +74,7 @@ ZFS_COMMON_OBJS +=		\
 	spa_history.o		\
 	spa_misc.o		\
 	space_map.o		\
+	space_reftree.o		\
 	txg.o			\
 	uberblock.o		\
 	unique.o		\

Modified: stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c
==============================================================================
--- stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c	Mon Feb 17 17:12:11 2014	(r262095)
@@ -1339,7 +1339,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t si
 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 
 	/* Check for any allocated blocks beyond the first */
-	if (dn->dn_phys->dn_maxblkid != 0)
+	if (dn->dn_maxblkid != 0)
 		goto fail;
 
 	mutex_enter(&dn->dn_dbufs_mtx);

Modified: stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c
==============================================================================
--- stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c	Mon Feb 17 17:11:58 2014	(r262094)
+++ stable/8/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c	Mon Feb 17 17:12:11 2014	(r262095)
@@ -31,6 +31,7 @@
 #include <sys/metaslab_impl.h>
 #include <sys/vdev_impl.h>
 #include <sys/zio.h>
+#include <sys/spa_impl.h>
 
 SYSCTL_DECL(_vfs_zfs);
 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
@@ -47,6 +48,11 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab
 	(!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
 	METASLAB_GANG_AVOID)))
 
+#define	METASLAB_WEIGHT_PRIMARY		(1ULL << 63)
+#define	METASLAB_WEIGHT_SECONDARY	(1ULL << 62)
+#define	METASLAB_ACTIVE_MASK		\
+	(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
+
 uint64_t metaslab_aliquot = 512ULL << 10;
 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;	/* force gang blocks */
 TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
@@ -61,6 +67,11 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO,
  * Values should be greater than or equal to 100.
  */
 int zfs_condense_pct = 200;
+TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
+SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
+    &zfs_condense_pct, 0,
+    "Condense on-disk spacemap when it is more than this many percents"
+    " of in-memory counterpart");
 
 /*
  * This value defines the number of allowed allocation failures per vdev.
@@ -88,15 +99,29 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_
  * no metaslab group will be excluded based on this criterion.
  */
 int zfs_mg_noalloc_threshold = 0;
+TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
+SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
+    &zfs_mg_noalloc_threshold, 0,
+    "Percentage of metaslab group size that should be free"
+    " to make it eligible for allocation");
 
 /*
- * Metaslab debugging: when set, keeps all space maps in core to verify frees.
+ * When set will load all metaslabs when pool is first opened.
  */
-static int metaslab_debug = 0;
-TUNABLE_INT("vfs.zfs.metaslab.debug", &metaslab_debug);
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug, CTLFLAG_RWTUN, &metaslab_debug,
-    0,
-    "Metaslab debugging: when set, keeps all space maps in core to verify frees");
+int metaslab_debug_load = 0;
+TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
+    &metaslab_debug_load, 0,
+    "Load all metaslabs when pool is first opened");
+
+/*
+ * When set will prevent metaslabs from being unloaded.
+ */
+int metaslab_debug_unload = 0;
+TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
+    &metaslab_debug_unload, 0,
+    "Prevent metaslabs from being unloaded");
 
 /*
  * Minimum size which forces the dynamic allocator to change
@@ -135,20 +160,24 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO,
     "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
 
 /*
- * Max number of space_maps to prefetch.
- */
-int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
-TUNABLE_INT("vfs.zfs.metaslab.prefetch_limit", &metaslab_prefetch_limit);
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, prefetch_limit, CTLFLAG_RWTUN,
-    &metaslab_prefetch_limit, 0, "Maximum number of space_maps to prefetch");
-
-/*
- * Percentage bonus multiplier for metaslabs that are in the bonus area.
+ * Percentage of all cpus that can be used by the metaslab taskq.
  */
-int metaslab_smo_bonus_pct = 150;
-TUNABLE_INT("vfs.zfs.metaslab.smo_bonus_pct", &metaslab_smo_bonus_pct);
-SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, smo_bonus_pct, CTLFLAG_RWTUN,
-    &metaslab_smo_bonus_pct, 0, "Maximum number of space_maps to prefetch");
+int metaslab_load_pct = 50;
+TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
+    &metaslab_load_pct, 0,
+    "Percentage of cpus that can be used by the metaslab taskq");
+
+/*
+ * Determines how many txgs a metaslab may remain loaded without having any
+ * allocations from it. As long as a metaslab continues to be used we will
+ * keep it loaded.
+ */
+int metaslab_unload_delay = TXG_SIZE * 2;
+TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
+    &metaslab_unload_delay, 0,
+    "Number of TXGs that an unused metaslab can be kept in memory");
 
 /*
  * Should we be willing to write data to degraded vdevs?
@@ -160,12 +189,41 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_
 TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
 
 /*
+ * Max number of metaslabs per group to preload.
+ */
+int metaslab_preload_limit = SPA_DVAS_PER_BP;
+TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
+    &metaslab_preload_limit, 0,
+    "Max number of metaslabs per group to preload");
+
+/*
+ * Enable/disable preloading of metaslab.
+ */
+boolean_t metaslab_preload_enabled = B_TRUE;
+TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
+    &metaslab_preload_enabled, 0,
+    "Max number of metaslabs per group to preload");
+
+/*
+ * Enable/disable additional weight factor for each metaslab.
+ */
+boolean_t metaslab_weight_factor_enable = B_FALSE;
+TUNABLE_INT("vfs.zfs.metaslab.weight_factor_enable",
+    &metaslab_weight_factor_enable);
+SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN,
+    &metaslab_weight_factor_enable, 0,
+    "Enable additional weight factor for each metaslab");
+
+
+/*
  * ==========================================================================
  * Metaslab classes
  * ==========================================================================
  */
 metaslab_class_t *
-metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
+metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
 {
 	metaslab_class_t *mc;
 
@@ -296,9 +354,9 @@ metaslab_compare(const void *x1, const v
 	/*
 	 * If the weights are identical, use the offset to force uniqueness.
 	 */
-	if (m1->ms_map->sm_start < m2->ms_map->sm_start)
+	if (m1->ms_start < m2->ms_start)
 		return (-1);
-	if (m1->ms_map->sm_start > m2->ms_map->sm_start)
+	if (m1->ms_start > m2->ms_start)
 		return (1);
 
 	ASSERT3P(m1, ==, m2);
@@ -366,6 +424,9 @@ metaslab_group_create(metaslab_class_t *
 	mg->mg_class = mc;
 	mg->mg_activation_count = 0;
 
+	mg->mg_taskq = taskq_create("metaslab_group_tasksq", metaslab_load_pct,
+	    minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
+
 	return (mg);
 }
 
@@ -435,6 +496,8 @@ metaslab_group_passivate(metaslab_group_
 		return;
 	}
 
+	taskq_wait(mg->mg_taskq);
+
 	mgprev = mg->mg_prev;
 	mgnext = mg->mg_next;
 
@@ -515,130 +578,200 @@ metaslab_group_allocatable(metaslab_grou
 
 /*
  * ==========================================================================
- * Common allocator routines
+ * Range tree callbacks
  * ==========================================================================
  */
+
+/*
+ * Comparison function for the private size-ordered tree. Tree is sorted
+ * by size, larger sizes at the end of the tree.
+ */
 static int
-metaslab_segsize_compare(const void *x1, const void *x2)
+metaslab_rangesize_compare(const void *x1, const void *x2)
 {
-	const space_seg_t *s1 = x1;
-	const space_seg_t *s2 = x2;
-	uint64_t ss_size1 = s1->ss_end - s1->ss_start;
-	uint64_t ss_size2 = s2->ss_end - s2->ss_start;
+	const range_seg_t *r1 = x1;
+	const range_seg_t *r2 = x2;
+	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
+	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
 
-	if (ss_size1 < ss_size2)
+	if (rs_size1 < rs_size2)
 		return (-1);
-	if (ss_size1 > ss_size2)
+	if (rs_size1 > rs_size2)
 		return (1);
 
-	if (s1->ss_start < s2->ss_start)
+	if (r1->rs_start < r2->rs_start)
 		return (-1);
-	if (s1->ss_start > s2->ss_start)
+
+	if (r1->rs_start > r2->rs_start)
 		return (1);
 
 	return (0);
 }
 
 /*
- * This is a helper function that can be used by the allocator to find
- * a suitable block to allocate. This will search the specified AVL
- * tree looking for a block that matches the specified criteria.
+ * Create any block allocator specific components. The current allocators
+ * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
  */
-static uint64_t
-metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
-    uint64_t align)
+static void
+metaslab_rt_create(range_tree_t *rt, void *arg)
 {
-	space_seg_t *ss, ssearch;
-	avl_index_t where;
-
-	ssearch.ss_start = *cursor;
-	ssearch.ss_end = *cursor + size;
+	metaslab_t *msp = arg;
 
-	ss = avl_find(t, &ssearch, &where);
-	if (ss == NULL)
-		ss = avl_nearest(t, where, AVL_AFTER);
+	ASSERT3P(rt->rt_arg, ==, msp);
+	ASSERT(msp->ms_tree == NULL);
 
-	while (ss != NULL) {
-		uint64_t offset = P2ROUNDUP(ss->ss_start, align);
-
-		if (offset + size <= ss->ss_end) {
-			*cursor = offset + size;
-			return (offset);
-		}
-		ss = AVL_NEXT(t, ss);
-	}
-
-	/*
-	 * If we know we've searched the whole map (*cursor == 0), give up.
-	 * Otherwise, reset the cursor to the beginning and try again.
-	 */
-	if (*cursor == 0)
-		return (-1ULL);
-
-	*cursor = 0;
-	return (metaslab_block_picker(t, cursor, size, align));
+	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
+	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
 }
 
+/*
+ * Destroy the block allocator specific components.
+ */
 static void
-metaslab_pp_load(space_map_t *sm)
+metaslab_rt_destroy(range_tree_t *rt, void *arg)
 {
-	space_seg_t *ss;
-
-	ASSERT(sm->sm_ppd == NULL);
-	sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
+	metaslab_t *msp = arg;
 
-	sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
-	avl_create(sm->sm_pp_root, metaslab_segsize_compare,
-	    sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
+	ASSERT3P(rt->rt_arg, ==, msp);
+	ASSERT3P(msp->ms_tree, ==, rt);
+	ASSERT0(avl_numnodes(&msp->ms_size_tree));
 
-	for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
-		avl_add(sm->sm_pp_root, ss);
+	avl_destroy(&msp->ms_size_tree);
 }
 
 static void
-metaslab_pp_unload(space_map_t *sm)
+metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
 {
-	void *cookie = NULL;
-
-	kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
-	sm->sm_ppd = NULL;
-
-	while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
-		/* tear down the tree */
-	}
+	metaslab_t *msp = arg;
 
-	avl_destroy(sm->sm_pp_root);
-	kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
-	sm->sm_pp_root = NULL;
+	ASSERT3P(rt->rt_arg, ==, msp);
+	ASSERT3P(msp->ms_tree, ==, rt);
+	VERIFY(!msp->ms_condensing);
+	avl_add(&msp->ms_size_tree, rs);
 }
 
-/* ARGSUSED */
 static void
-metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
+metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
 {
-	/* No need to update cursor */
+	metaslab_t *msp = arg;
+
+	ASSERT3P(rt->rt_arg, ==, msp);
+	ASSERT3P(msp->ms_tree, ==, rt);
+	VERIFY(!msp->ms_condensing);
+	avl_remove(&msp->ms_size_tree, rs);
 }
 
-/* ARGSUSED */
 static void
-metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
+metaslab_rt_vacate(range_tree_t *rt, void *arg)
 {
-	/* No need to update cursor */
+	metaslab_t *msp = arg;
+
+	ASSERT3P(rt->rt_arg, ==, msp);
+	ASSERT3P(msp->ms_tree, ==, rt);
+
+	/*
+	 * Normally one would walk the tree freeing nodes along the way.
+	 * Since the nodes are shared with the range trees we can avoid
+	 * walking all nodes and just reinitialize the avl tree. The nodes
+	 * will be freed by the range tree, so we don't want to free them here.
+	 */
+	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
+	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
 }
 
+static range_tree_ops_t metaslab_rt_ops = {
+	metaslab_rt_create,
+	metaslab_rt_destroy,
+	metaslab_rt_add,
+	metaslab_rt_remove,
+	metaslab_rt_vacate
+};
+
+/*
+ * ==========================================================================
+ * Metaslab block operations
+ * ==========================================================================
+ */
+
 /*
  * Return the maximum contiguous segment within the metaslab.
  */
 uint64_t
-metaslab_pp_maxsize(space_map_t *sm)
+metaslab_block_maxsize(metaslab_t *msp)
 {
-	avl_tree_t *t = sm->sm_pp_root;
-	space_seg_t *ss;
+	avl_tree_t *t = &msp->ms_size_tree;
+	range_seg_t *rs;
 
-	if (t == NULL || (ss = avl_last(t)) == NULL)
+	if (t == NULL || (rs = avl_last(t)) == NULL)
 		return (0ULL);
 
-	return (ss->ss_end - ss->ss_start);
+	return (rs->rs_end - rs->rs_start);
+}
+
+uint64_t
+metaslab_block_alloc(metaslab_t *msp, uint64_t size)
+{
+	uint64_t start;
+	range_tree_t *rt = msp->ms_tree;
+
+	VERIFY(!msp->ms_condensing);
+
+	start = msp->ms_ops->msop_alloc(msp, size);
+	if (start != -1ULL) {
+		vdev_t *vd = msp->ms_group->mg_vd;
+
+		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
+		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
+		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
+		range_tree_remove(rt, start, size);
+	}
+	return (start);
+}
+
+/*
+ * ==========================================================================
+ * Common allocator routines
+ * ==========================================================================
+ */
+
+/*
+ * This is a helper function that can be used by the allocator to find
+ * a suitable block to allocate. This will search the specified AVL
+ * tree looking for a block that matches the specified criteria.
+ */
+static uint64_t
+metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
+    uint64_t align)
+{
+	range_seg_t *rs, rsearch;
+	avl_index_t where;
+
+	rsearch.rs_start = *cursor;
+	rsearch.rs_end = *cursor + size;
+
+	rs = avl_find(t, &rsearch, &where);
+	if (rs == NULL)
+		rs = avl_nearest(t, where, AVL_AFTER);
+
+	while (rs != NULL) {
+		uint64_t offset = P2ROUNDUP(rs->rs_start, align);
+
+		if (offset + size <= rs->rs_end) {
+			*cursor = offset + size;
+			return (offset);
+		}
+		rs = AVL_NEXT(t, rs);
+	}
+
+	/*
+	 * If we know we've searched the whole map (*cursor == 0), give up.
+	 * Otherwise, reset the cursor to the beginning and try again.
+	 */
+	if (*cursor == 0)
+		return (-1ULL);
+
+	*cursor = 0;
+	return (metaslab_block_picker(t, cursor, size, align));
 }
 
 /*
@@ -647,29 +780,31 @@ metaslab_pp_maxsize(space_map_t *sm)
  * ==========================================================================
  */
 static uint64_t
-metaslab_ff_alloc(space_map_t *sm, uint64_t size)
+metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
 {
-	avl_tree_t *t = &sm->sm_root;
+	/*
+	 * Find the largest power of 2 block size that evenly divides the
+	 * requested size. This is used to try to allocate blocks with similar
+	 * alignment from the same area of the metaslab (i.e. same cursor
+	 * bucket) but it does not guarantee that other allocations sizes
+	 * may exist in the same region.
+	 */
 	uint64_t align = size & -size;
-	uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
+	uint64_t *cursor = &msp->ms_lbas[highbit(align) - 1];
+	avl_tree_t *t = &msp->ms_tree->rt_root;
 
 	return (metaslab_block_picker(t, cursor, size, align));
 }
 

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201402171712.s1HHCCxb055774>