Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 8 Feb 2018 20:24:54 +0000 (UTC)
From:      Alan Somers <asomers@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r329027 - in projects/zfsd/head/tests/sys/cddl/zfs: include tests/zfsd
Message-ID:  <201802082024.w18KOsBQ034658@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: asomers
Date: Thu Feb  8 20:24:54 2018
New Revision: 329027
URL: https://svnweb.freebsd.org/changeset/base/329027

Log:
  WIP removing SAS phy control from the ZFS test suite
  
  Convert most zfsd tests to use libgnop instead of libsas
  
  This will allow those tests to run on systems without SAS
  expanders, and eliminate one source of intermittency in the
  tests.
  
  tests/sys/cddl/zfs/include/libgnop.kshlib
  	* Allow setting the physical path on a gnop device
  	* Fix calculation of disk size
  
  tests/sys/cddl/zfs/tests/zfsd/cleanup.ksh
  	Cleanup gnops on exit
  
  tests/sys/cddl/zfs/tests/zfsd/setup.ksh
  	Don't bother verifying SAS expanders.  They won't be needed for much
  	longer.
  
  tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_001_neg.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_002_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_003_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_004_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_007_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_import_001_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_003_pos.ksh
  tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh
  	Simulate drive pulls by using gnops instead of SAS phy control.
  
  Sponsored by:	Spectra Logic Corp

Modified:
  projects/zfsd/head/tests/sys/cddl/zfs/include/libgnop.kshlib
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/cleanup.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/setup.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_001_neg.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_002_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_003_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_004_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_007_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_import_001_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_003_pos.ksh
  projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh

Modified: projects/zfsd/head/tests/sys/cddl/zfs/include/libgnop.kshlib
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/include/libgnop.kshlib	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/include/libgnop.kshlib	Thu Feb  8 20:24:54 2018	(r329027)
@@ -39,7 +39,10 @@
 # won't be present on the gnop device and vice versa.
 function create_gnop
 {
+	# Name of disk to use, with or without /dev/
 	typeset disk=$1
+	# Optional physical path to use
+	typeset physpath=$2
 	# size of disk in bytes
 	typeset -li disk_size
 	# disk size, rounded down to multiple of 16384
@@ -48,14 +51,19 @@ function create_gnop
 	typeset -li nop_size
 	# offset of the beginning of the nop device in bytes
 	typeset -li nop_offset
+	typeset args
 
-	disk_size=`diskinfo da0 | cut -f 3`
+	disk_size=`diskinfo $disk | cut -f 3`
 	# Round it down so the nop device will be 4k-aligned
 	disk_size_rounded=$(( ${disk_size} / 16384 * 16384 ))
 	nop_size=$(( ${disk_size_rounded} / 4 ))
 	nop_offset=${nop_size}
+	args="-s ${nop_size} -o ${nop_offset}"
+	if [ -n "$physpath" ]; then
+		args="$args -z $physpath"
+	fi
 
-	gnop create -s ${nop_size} -o ${nop_offset} ${disk}
+	gnop create ${args} ${disk}
 }
 
 # Create multiple gnop devices

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/cleanup.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/cleanup.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/cleanup.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -27,10 +27,11 @@
 # $FreeBSD$
 
 . ${STF_SUITE}/include/libtest.kshlib
+. ${STF_SUITE}/include/libgnop.kshlib
 
-verify_runnable "global"
-
 # Rotate logs now, because this test can generate a great volume of log entries
 newsyslog
 
-default_cleanup
+default_cleanup_noexit
+destroy_gnops ${DISKS}
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/setup.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/setup.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/setup.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -27,18 +27,6 @@
 # $FreeBSD$
 
 . ${STF_SUITE}/include/libtest.kshlib
-. ${STF_SUITE}/include/libsas.kshlib
-
-verify_runnable "global"
-echo "list of disks: $DISKS"
-
-# Make sure that all of the disks that we've been given are attached to a
-# SAS expander, and that we can find the phy they're attached to.  This
-# function will cause the script to exit if it fails.
-for disk in $DISKS
-do
-	find_verify_sas_disk $disk
-done
 
 # Rotate logs now, because this test can generate a great volume of log entries
 newsyslog

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_001_neg.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_001_neg.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_001_neg.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -28,7 +28,7 @@
 #
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
 ################################################################################
 #
@@ -37,18 +37,13 @@
 # ID: zfsd_autoreplace_001_neg
 #
 # DESCRIPTION: 
-#	In a pool with the autoreplace property unset, a vdev will not be
+#	In a pool without the autoreplace property unset, a vdev will not be
 #	replaced by physical path
 #
 # STRATEGY:
 #	1. Create 1 storage pool without hot spares
-#	2. Remove a vdev by disabling its SAS phy
-#	3. Export the pool
-#	4. Reenable the missing dev's SAS phy
-#	5. Erase the missing dev's ZFS label
-#	6. Disable the missing dev's SAS phy again
-#	7. Import the pool
-#	8. Reenable the missing dev's SAS phy
+#	2. Remove a vdev
+#	4. Create a new vdev with the same physical path as the first one
 #	9. Verify that it does not get added to the pool.
 #
 # TESTABILITY: explicit
@@ -61,16 +56,10 @@
 #
 ###############################################################################
 
-verify_runnable "global"
+log_assert "A pool without the autoreplace property set will not replace disks by physical path"
 
-log_assert "A pool with the autoreplace property set will replace disks by physical path"
-
-
-log_onexit autoreplace_cleanup
-
 function verify_assertion
 {
-	do_autoreplace
 	# 9. Verify that it does not get added to the pool
 	for ((timeout=0; timeout<4; timeout=$timeout+1)); do
 		log_mustnot check_state $TESTPOOL $REMOVAL_DISK "ONLINE"
@@ -78,15 +67,27 @@ function verify_assertion
 	done
 }
 
-
+typeset PHYSPATH="some_physical_path"
 typeset REMOVAL_DISK=$DISK0
-typeset POOLDEVS="$DISK0 $DISK1 $DISK2 $DISK3"
+typeset REMOVAL_NOP=${DISK0}.nop
+typeset NEW_DISK=$DISK4
+typeset NEW_NOP=${DISK4}.nop
+typeset OTHER_DISKS="${DISK1} ${DISK2} ${DISK3}"
+typeset ALLDISKS="${DISK0} ${DISK1} ${DISK2} ${DISK3}"
+typeset ALLNOPS=${ALLDISKS//~(E)([[:space:]]+|$)/.nop\1}
 set -A MY_KEYWORDS "mirror" "raidz1" "raidz2"
 ensure_zfsd_running
+log_must create_gnops $OTHER_DISKS
 for keyword in "${MY_KEYWORDS[@]}" ; do
-	log_must create_pool $TESTPOOL $keyword $POOLDEVS
-	log_must poolexists "$TESTPOOL"
-	log_must $ZPOOL set autoreplace=off $TESTPOOL
+	log_must create_gnop $REMOVAL_DISK $PHYSPATH
+	log_must create_pool $TESTPOOL $keyword $ALLNOPS
+	log_must $ZPOOL set autoreplace=on $TESTPOOL
+
+	log_must destroy_gnop $REMOVAL_DISK
+	log_must create_gnop $NEW_DISK $PHYSPATH
 	verify_assertion
 	destroy_pool "$TESTPOOL"
+	log_must destroy_gnop $NEW_DISK
 done
+
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_002_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_002_pos.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_002_pos.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -28,7 +28,7 @@
 #
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
 ################################################################################
 #
@@ -42,13 +42,8 @@
 #
 # STRATEGY:
 #	1. Create 1 storage pool without hot spares
-#	2. Remove a vdev by disabling its SAS phy
-#	3. Export the pool
-#	4. Reenable the missing dev's SAS phy
-#	5. Erase the missing dev's ZFS label
-#	6. Disable the missing dev's SAS phy again
-#	7. Import the pool
-#	8. Reenable the missing dev's SAS phy
+#	2. Remove a vdev
+#	4. Create a new vdev with the same physical path as the first one
 #	9. Verify that it does get added to the pool.
 #
 # TESTABILITY: explicit
@@ -61,27 +56,35 @@
 #
 ###############################################################################
 
-verify_runnable "global"
-
 log_assert "A pool with the autoreplace property will replace disks by physical path"
 
-log_onexit autoreplace_cleanup
-
 function verify_assertion
 {
-	do_autoreplace
-	wait_for_pool_dev_state_change 20 $REMOVAL_DISK ONLINE
+	wait_for_pool_dev_state_change 20 $NEW_DISK ONLINE
 }
 
 
+typeset PHYSPATH="some_physical_path"
 typeset REMOVAL_DISK=$DISK0
-typeset POOLDEVS="$DISK0 $DISK1 $DISK2 $DISK3"
+typeset REMOVAL_NOP=${DISK0}.nop
+typeset NEW_DISK=$DISK4
+typeset NEW_NOP=${DISK4}.nop
+typeset OTHER_DISKS="${DISK1} ${DISK2} ${DISK3}"
+typeset ALLDISKS="${DISK0} ${DISK1} ${DISK2} ${DISK3}"
+typeset ALLNOPS=${ALLDISKS//~(E)([[:space:]]+|$)/.nop\1}
 set -A MY_KEYWORDS "mirror" "raidz1" "raidz2"
 ensure_zfsd_running
+log_must create_gnops $OTHER_DISKS
 for keyword in "${MY_KEYWORDS[@]}" ; do
-	log_must create_pool $TESTPOOL $keyword $POOLDEVS
-	log_must poolexists "$TESTPOOL"
+	log_must create_gnop $REMOVAL_DISK $PHYSPATH
+	log_must create_pool $TESTPOOL $keyword $ALLNOPS
 	log_must $ZPOOL set autoreplace=on $TESTPOOL
+
+	log_must destroy_gnop $REMOVAL_DISK
+	log_must create_gnop $NEW_DISK $PHYSPATH
 	verify_assertion
 	destroy_pool "$TESTPOOL"
+	log_must destroy_gnop $NEW_DISK
 done
+
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_003_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_003_pos.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_autoreplace_003_pos.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -27,7 +27,7 @@
 #
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
 ################################################################################
 #
@@ -42,14 +42,9 @@
 #
 # STRATEGY:
 #	1. Create 1 storage pool with a hot spare
-#	2. Remove a vdev by disabling its SAS phy
+#	2. Remove a vdev
 #	3. Wait for the hotspare to fully resilver
-#	4. Export the pool
-#	5. Reenable the missing dev's SAS phy
-#	6. Erase the missing dev's ZFS label
-#	7. Disable the missing dev's SAS phy again
-#	8. Import the pool
-#	9. Reenable the missing dev's SAS phy
+#	4. Create a new vdev with the same physical path as the first one
 #	10. Verify that it does get added to the pool.
 #	11. Verify that the hotspare gets removed.
 #
@@ -63,17 +58,12 @@
 #
 ###############################################################################
 
-verify_runnable "global"
+log_assert "A pool with the autoreplace property will replace disks by physical path, even if a spare is active"
 
-log_assert "A pool with the autoreplace property will replace disks by physical path"
-
-log_onexit autoreplace_cleanup
-
 function verify_assertion
 {
-	do_autoreplace "$SPARE_DISK"
-	# Verify that the original disk gets added to the pool
-	wait_for_pool_dev_state_change 20 $REMOVAL_DISK ONLINE
+	# Verify that the replacement disk gets added to the pool
+	wait_for_pool_dev_state_change 20 $NEW_DISK ONLINE
 
 	# Wait for resilvering to complete
 	wait_until_resilvered
@@ -83,15 +73,28 @@ function verify_assertion
 }
 
 
-typeset SPARE_DISK=$DISK0
-typeset REMOVAL_DISK=$DISK1
-typeset POOLDEVS="$DISK1 $DISK2 $DISK3 $DISK4"
+typeset PHYSPATH="some_physical_path"
+typeset REMOVAL_DISK=$DISK0
+typeset REMOVAL_NOP=${DISK0}.nop
+typeset NEW_DISK=$DISK4
+typeset NEW_NOP=${DISK4}.nop
+typeset SPARE_DISK=${DISK5}
+typeset SPARE_NOP=${DISK5}.nop
+typeset OTHER_DISKS="${DISK1} ${DISK2} ${DISK3}"
+typeset OTHER_NOPS=${OTHER_DISKS//~(E)([[:space:]]+|$)/.nop\1}
 set -A MY_KEYWORDS "mirror" "raidz1" "raidz2"
 ensure_zfsd_running
+log_must create_gnops $OTHER_DISKS $SPARE_DISK
 for keyword in "${MY_KEYWORDS[@]}" ; do
-	log_must create_pool $TESTPOOL $keyword $POOLDEVS spare $SPARE_DISK
-	log_must poolexists "$TESTPOOL"
+	log_must create_gnop $REMOVAL_DISK $PHYSPATH
+	log_must create_pool $TESTPOOL $keyword $REMOVAL_NOP $OTHER_NOPS spare $SPARE_NOP
 	log_must $ZPOOL set autoreplace=on $TESTPOOL
+
+	log_must destroy_gnop $REMOVAL_DISK
+	log_must create_gnop $NEW_DISK $PHYSPATH
 	verify_assertion
 	destroy_pool "$TESTPOOL"
+	log_must destroy_gnop $NEW_DISK
 done
+
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_004_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_004_pos.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_004_pos.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -28,7 +28,7 @@
 #
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
 ################################################################################
 #
@@ -42,11 +42,10 @@
 #       
 #
 # STRATEGY:
-#	1. Create 1 storage pools with hot spares.  Use disks instead of files
-#	   because they can be removed.
-#	2. Remove one vdev by turning off its SAS phy.
+#	1. Create 1 storage pools with hot spares.
+#	2. Remove one vdev
 #	3. Verify that the spare is in use.
-#	4. Reinsert the vdev by enabling its phy
+#	4. Recreate the vdev
 #	5. Verify that the vdev gets resilvered and the spare gets removed
 #
 # TESTABILITY: explicit
@@ -59,19 +58,12 @@
 #
 ###############################################################################
 
-verify_runnable "global"
-
 log_assert "Removing a disk from a pool results in the spare activating"
 
-log_onexit autoreplace_cleanup
-
-
 function verify_assertion # spare_dev
 {
 	typeset spare_dev=$1
-	find_verify_sas_disk $REMOVAL_DISK
-	log_note "Disabling \"$REMOVAL_DISK\" on expander $EXPANDER phy $PHY"
-	disable_sas_disk $EXPANDER $PHY
+	log_must destroy_gnop $REMOVAL_DISK
 
 	# Check to make sure ZFS sees the disk as removed
 	wait_for_pool_removal 20
@@ -81,11 +73,10 @@ function verify_assertion # spare_dev
 	log_must $ZPOOL status $TESTPOOL
 
 	# Reenable the  missing disk
-	log_note "Reenabling phy on expander $EXPANDER phy $PHY"
-	enable_sas_disk $EXPANDER $PHY
+	log_must create_gnop $REMOVAL_DISK $PHYSPATH
 
 	# Check that the disk has rejoined the pool & resilvered
-	wait_for_pool_dev_state_change 20 $REMOVAL_DISK ONLINE
+	wait_for_pool_dev_state_change 20 $REMOVAL_NOP ONLINE
 	wait_until_resilvered
 
 	# Finally, check that the spare deactivated
@@ -93,16 +84,23 @@ function verify_assertion # spare_dev
 }
 
 
+typeset PHYSPATH="some_physical_path"
 typeset REMOVAL_DISK=$DISK0
-typeset SDEV=$DISK4
-typeset POOLDEVS="$DISK0 $DISK1 $DISK2 $DISK3"
+typeset REMOVAL_NOP=${DISK0}.nop
+typeset SPARE_DISK=$DISK4
+typeset SPARE_NOP=${DISK4}.nop
+typeset OTHER_DISKS="${DISK1} ${DISK2} ${DISK3}"
+typeset OTHER_NOPS=${OTHER_DISKS//~(E)([[:space:]]+|$)/.nop\1}
 set -A MY_KEYWORDS "mirror" "raidz1" "raidz2"
 ensure_zfsd_running
+log_must create_gnops $OTHER_DISKS $SPARE_DISK
+log_must create_gnop $REMOVAL_DISK $PHYSPATH
 for keyword in "${MY_KEYWORDS[@]}" ; do
-	log_must create_pool $TESTPOOL $keyword $POOLDEVS spare $SDEV
-	log_must poolexists "$TESTPOOL"
-	log_must $ZPOOL set autoreplace=on "$TESTPOOL"
-	iterate_over_hotspares verify_assertion $SDEV
+	log_must create_pool $TESTPOOL $keyword $REMOVAL_NOP $OTHER_NOPS spare $SPARE_NOP
+	log_must $ZPOOL set autoreplace=on $TESTPOOL
+	iterate_over_hotspares verify_assertion $SPARE_NOP
 
 	destroy_pool "$TESTPOOL"
 done
+
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_007_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_007_pos.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_hotspare_007_pos.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -28,7 +28,7 @@
 #
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
 ################################################################################
 #
@@ -42,10 +42,9 @@
 #       
 #
 # STRATEGY:
-#	1. Create 1 storage pools with hot spares.  Use disks instead of files
-#	   because they can be removed.
+#	1. Create 1 storage pools with hot spares.
 #	2. Turn off zfsd
-#	3. Remove one vdev by turning off its SAS phy.
+#	3. Remove one vdev
 #	4. Restart zfsd
 #	5. Verify that the spare is in use.
 #
@@ -69,11 +68,9 @@ log_onexit autoreplace_cleanup
 function verify_assertion # spare_dev
 {
 	typeset spare_dev=$1
-	find_verify_sas_disk $REMOVAL_DISK
 	stop_zfsd
 
-	log_note "Disabling \"$REMOVAL_DISK\" on expander $EXPANDER phy $PHY"
-	disable_sas_disk $EXPANDER $PHY
+	log_must destroy_gnop $REMOVAL_DISK
 
 	# Check to make sure ZFS sees the disk as removed
 	wait_for_pool_removal 20
@@ -84,19 +81,26 @@ function verify_assertion # spare_dev
 	wait_for_pool_dev_state_change 20 $spare_dev INUSE
 
 	# Reenable the  missing disk
-	log_note "Reenabling phy on expander $EXPANDER phy $PHY"
-	enable_sas_disk $EXPANDER $PHY
+	log_must create_gnop $REMOVAL_DISK $PHYSPATH
 }
 
+typeset PHYSPATH="some_physical_path"
 typeset REMOVAL_DISK=$DISK0
-typeset SDEV=$DISK4
-typeset POOLDEVS="$DISK0 $DISK1 $DISK2 $DISK3"
+typeset REMOVAL_NOP=${DISK0}.nop
+typeset SPARE_DISK=$DISK4
+typeset SPARE_NOP=${DISK4}.nop
+typeset OTHER_DISKS="${DISK1} ${DISK2} ${DISK3}"
+typeset OTHER_NOPS=${OTHER_DISKS//~(E)([[:space:]]+|$)/.nop\1}
 set -A MY_KEYWORDS "mirror" "raidz1" "raidz2"
 ensure_zfsd_running
+log_must create_gnops $OTHER_DISKS $SPARE_DISK
+log_must create_gnop $REMOVAL_DISK $PHYSPATH
 for keyword in "${MY_KEYWORDS[@]}" ; do
-	log_must create_pool $TESTPOOL $keyword $POOLDEVS spare $SDEV
-	log_must poolexists "$TESTPOOL"
-	iterate_over_hotspares verify_assertion $SDEV
+	log_must create_pool $TESTPOOL $keyword $REMOVAL_NOP $OTHER_NOPS spare $SPARE_NOP
+	log_must $ZPOOL set autoreplace=on $TESTPOOL
+	iterate_over_hotspares verify_assertion $SPARE_NOP
 
 	destroy_pool "$TESTPOOL"
 done
+
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_import_001_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_import_001_pos.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_import_001_pos.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -28,7 +28,7 @@
 #
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 
 ################################################################################
 #
@@ -45,13 +45,14 @@
 #       
 #
 # STRATEGY:
-#	1. Create 1 storage pools with hot spares.  Use disks instead of files
-#	   because they can be removed.
-#	2. Remove one disk by turning off its SAS phy.
+#	1. Create 1 storage pools with hot spares.
+#	2. Remove one disk
 #	3. Verify that the spare is in use.
-#	4. Reinsert the vdev by enabling its phy
-#	5. Verify that the vdev gets resilvered and the spare gets removed
-#	6. Use additional zpool history data to verify that the pool
+#	4. Export the pool
+#	5. Recreate the vdev
+#	6. Import the pool
+#	7. Verify that the vdev gets resilvered and the spare gets removed
+#	8. Use additional zpool history data to verify that the pool
 #	   finished resilvering _before_ zfsd detached the spare.
 #
 # TESTABILITY: explicit
@@ -69,9 +70,7 @@ verify_runnable "global"
 function verify_assertion # spare_dev
 {
 	typeset spare_dev=$1
-	find_verify_sas_disk $REMOVAL_DISK
-	log_note "Disabling \"$REMOVAL_DISK\" on expander $EXPANDER phy $PHY"
-	disable_sas_disk $EXPANDER $PHY
+	log_must destroy_gnop $REMOVAL_DISK
 
 	# Check to make sure ZFS sees the disk as removed
 	wait_for_pool_removal 20
@@ -84,8 +83,7 @@ function verify_assertion # spare_dev
 	log_must $ZPOOL export $TESTPOOL
 
 	# Reenable the  missing disk
-	log_note "Reenabling phy on expander $EXPANDER phy $PHY"
-	enable_sas_disk $EXPANDER $PHY
+	log_must create_gnop $REMOVAL_DISK
 
 	# Import the pool
 	log_must $ZPOOL import $TESTPOOL
@@ -129,26 +127,24 @@ function verify_assertion # spare_dev
 }
 
 
-if ! $(is_physical_device $DISKS) ; then
-	log_unsupported "This directory cannot be run on raw files."
-fi
-
 log_assert "If a removed drive gets reinserted while the pool is exported, \
 	    it will replace its spare when reinserted."
 
-log_onexit autoreplace_cleanup
-
 ensure_zfsd_running
-set_devs
 
 typeset REMOVAL_DISK=$DISK0
-typeset SDEV=$DISK4
-typeset POOLDEVS="$DISK0 $DISK1 $DISK2 $DISK3"
+typeset REMOVAL_NOP=${DISK0}.nop
+typeset SPARE_DISK=$DISK4
+typeset SPARE_NOP=${DISK4}.nop
+typeset OTHER_DISKS="${DISK1} ${DISK2} ${DISK3}"
+typeset OTHER_NOPS=${OTHER_DISKS//~(E)([[:space:]]+|$)/.nop\1}
 set -A MY_KEYWORDS "mirror" "raidz1" "raidz2"
+ensure_zfsd_running
+log_must create_gnops $REMOVAL_DISK $OTHER_DISKS $SPARE_DISK
 for keyword in "${MY_KEYWORDS[@]}" ; do
-	log_must create_pool $TESTPOOL $keyword $POOLDEVS spare $SDEV
-	log_must poolexists "$TESTPOOL"
-	iterate_over_hotspares verify_assertion $SDEV
-
+	log_must create_pool $TESTPOOL $keyword $REMOVAL_NOP $OTHER_NOPS spare $SPARE_NOP
+	verify_assertion
 	destroy_pool "$TESTPOOL"
 done
+
+log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_003_pos.ksh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_003_pos.ksh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_replace_003_pos.ksh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -33,103 +33,73 @@
 # $FreeBSD$
 
 . $STF_SUITE/include/libtest.kshlib
-. $STF_SUITE/include/libsas.kshlib
+. $STF_SUITE/include/libgnop.kshlib
 . $STF_SUITE/tests/hotspare/hotspare.kshlib
 . $STF_SUITE/tests/zfsd/zfsd.kshlib
 
-verify_runnable "global"
-
 function cleanup
 {
-	# See if the phy has been disabled, and try to re-enable it if possible.
-	[ -n "$EXPANDER0" -a -n "$PHY0" ] && enable_sas_disk $EXPANDER0 $PHY0
-	[ -n "$EXPANDER1" -a -n "$PHY1" ] && enable_sas_disk $EXPANDER1 $PHY1
-	[ -n "$EXPANDER" -a -n "$PHY" ] && enable_sas_disk $EXPANDER $PHY
-
 	destroy_pool $TESTPOOL
 	[[ -e $TESTDIR ]] && log_must $RM -rf $TESTDIR/*
+	for md in $MD0 $MD1 $MD2 $MD3; do
+		gnop destroy -f $md
+		for ((i=0; i<5; i=i+1)); do
+			$MDCONFIG -d -u $md && break
+			$SLEEP 1
+		done
+	done
 }
 
-# arg1: disk devname
-# Leaves EXPANDER and PHY set appropriately
-function remove_disk
-{
-	typeset DISK=$1
-	# Find the first disk, get the expander and phy
-	log_note "Looking for expander and phy information for $DISK"
-	find_verify_sas_disk $DISK
-
-	log_note "Disabling \"$DISK\" on expander $EXPANDER phy $PHY"
-	# Disable the first disk.
-	disable_sas_disk $EXPANDER $PHY
-
-	# Check to make sure ZFS sees the disk as removed
-	wait_for_pool_dev_state_change 20 $DISK "REMOVED|UNAVAIL"
-}
-
-# arg1: disk's old devname
-# arg2: disk's expander's devname
-# arg3: disk's phy number
-# arg4: whether the devname must differ after reconnecting
-function reconnect_disk
-{
-	typeset DISK=$1
-	typeset EXPANDER=$2
-	typeset PHY=$3
-
-	# Re-enable the disk, we don't want to leave it turned off
-	log_note "Re-enabling phy $PHY on expander $EXPANDER"
-	enable_sas_disk $EXPANDER $PHY
-
-	log_note "Checking to see whether disk has reappeared"
-
-	prev_disk=$(find_disks $DISK)
-	cur_disk=$(find_disks $FOUNDDISK)
-
-	# If you get this, the test must be fixed to guarantee that
-	# it will reappear with a different name.
-	[ "${prev_disk}" = "${cur_disk}" ] && log_unsupported \
-		"Disk $DISK reappeared with the same devname."
-
-	#Disk should have auto-joined the zpool. Verify it's status is online.
-	wait_for_pool_dev_state_change 20 $FOUNDDISK ONLINE
-}
-
 log_assert "ZFSD will correctly replace disks that disappear and reappear \
 	   with different devnames"
 
 # Outline
+# Use gnop on top of file-backed md devices
+# * file-backed md devices so we can destroy them and recreate them with
+#   different devnames
+# * gnop so we can destroy them while still in use
 # Create a double-parity pool
-# Remove two disks by disabling their SAS phys
-# Reenable the phys in the opposite order
-# Check that the disks's devnames have swapped
+# Remove two vdevs
+# Destroy the md devices and recreate in the opposite order
+# Check that the md's devnames have swapped
 # Verify that the pool regains its health
 
 log_onexit cleanup
 ensure_zfsd_running
 
-child_pids=""
 
-set -A DISKS_ARRAY $DISKS
-typeset DISK0=${DISKS_ARRAY[0]}
-typeset DISK1=${DISKS_ARRAY[1]}
-if [ ${DISK0##/dev/da} -gt ${DISK1##/dev/da} ]; then
-	# Swap disks so we'll disable the lowest numbered first
-	typeset TMP="$DISK1"
-	DISK1="$DISK0"
-	DISK0="$TMP"
-fi
+N_DEVARRAY_FILES=4
+set_devs
+typeset FILE0="${devarray[0]}"
+typeset FILE1="${devarray[1]}"
+typeset FILE2="${devarray[2]}"
+typeset FILE3="${devarray[3]}"
+typeset MD0=`$MDCONFIG -a -t vnode -f ${FILE0}`
+[ $? -eq 0 ] || atf_fail "Failed to create md device"
+typeset MD1=`$MDCONFIG -a -t vnode -f ${FILE1}`
+[ $? -eq 0 ] || atf_fail "Failed to create md device"
+typeset MD2=`$MDCONFIG -a -t vnode -f ${FILE2}`
+[ $? -eq 0 ] || atf_fail "Failed to create md device"
+typeset MD3=`$MDCONFIG -a -t vnode -f ${FILE3}`
+[ $? -eq 0 ] || atf_fail "Failed to create md device"
+log_must create_gnops $MD0 $MD1 $MD2 $MD3
 
 for type in "raidz2" "mirror"; do
 	# Create a pool on the supplied disks
-	create_pool $TESTPOOL $type $DISKS
+	create_pool $TESTPOOL $type ${MD0}.nop ${MD1}.nop ${MD2}.nop ${MD3}.nop
 
-	remove_disk $DISK0
-	typeset EXPANDER0=$EXPANDER
-	typeset PHY0=$PHY
-	remove_disk $DISK1
-	typeset EXPANDER1=$EXPANDER
-	typeset PHY1=$PHY
+	log_must destroy_gnop $MD0
+	for ((i=0; i<5; i=i+1)); do
+		$MDCONFIG -d -u $MD0 && break
+		$SLEEP 1
+	done
+	[ -c /dev/$MD0.nop ] && atf_fail "failed to destroy $MD0"
+	log_must destroy_gnop $MD1
+	for ((i=0; i<5; i=i+1)); do
+		$MDCONFIG -d -u $MD1 && break
+		$SLEEP 1
+	done
+	[ -c /dev/$MD1.nop ] && atf_fail "failed to destroy $MD0"
 
 	# Make sure that the pool is degraded
 	$ZPOOL status $TESTPOOL |grep "state:" |grep DEGRADED > /dev/null
@@ -137,11 +107,15 @@ for type in "raidz2" "mirror"; do
 		log_fail "Pool $TESTPOOL not listed as DEGRADED"
 	fi
 
-	reconnect_disk $DISK1 $EXPANDER1 $PHY1
-	reconnect_disk $DISK0 $EXPANDER0 $PHY0
+	# Recreate the vdevs in the opposite order
+	typeset MD0=`$MDCONFIG -a -t vnode -f ${FILE1}`
+	[ $? -eq 0 ] || atf_fail "Failed to create md device"
+	typeset MD1=`$MDCONFIG -a -t vnode -f ${FILE0}`
+	[ $? -eq 0 ] || atf_fail "Failed to create md device"
+	log_must create_gnops $MD0 $MD1
+
 	wait_until_resilvered
 	destroy_pool $TESTPOOL
-	log_must $RM -rf /$TESTPOOL
 done
 
 log_pass

Modified: projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh
==============================================================================
--- projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh	Thu Feb  8 20:22:59 2018	(r329026)
+++ projects/zfsd/head/tests/sys/cddl/zfs/tests/zfsd/zfsd_test.sh	Thu Feb  8 20:24:54 2018	(r329027)
@@ -204,7 +204,7 @@ atf_test_case zfsd_hotspare_004_pos cleanup
 zfsd_hotspare_004_pos_head()
 {
 	atf_set "descr" "Removing a disk from a pool results in the spare activating"
-	atf_set "require.progs"  zpool camcontrol zfsd
+	atf_set "require.progs"  gnop zpool camcontrol zfsd
 	atf_set "timeout" 3600
 }
 zfsd_hotspare_004_pos_body()
@@ -227,7 +227,7 @@ zfsd_hotspare_004_pos_cleanup()
 	. $(atf_get_srcdir)/../hotspare/hotspare.kshlib
 	. $(atf_get_srcdir)/../hotspare/hotspare.cfg
 
-	ksh93 $(atf_get_srcdir)/hotspare_cleanup.ksh || atf_fail "Cleanup failed"
+	ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
 }
 
 atf_test_case zfsd_hotspare_005_pos cleanup
@@ -292,7 +292,7 @@ atf_test_case zfsd_hotspare_007_pos cleanup
 zfsd_hotspare_007_pos_head()
 {
 	atf_set "descr" "zfsd will swap failed drives at startup"
-	atf_set "require.progs"  zpool camcontrol zfsd
+	atf_set "require.progs"  gnop zpool camcontrol zfsd
 	atf_set "timeout" 3600
 }
 zfsd_hotspare_007_pos_body()
@@ -315,7 +315,7 @@ zfsd_hotspare_007_pos_cleanup()
 	. $(atf_get_srcdir)/../hotspare/hotspare.kshlib
 	. $(atf_get_srcdir)/../hotspare/hotspare.cfg
 
-	ksh93 $(atf_get_srcdir)/hotspare_cleanup.ksh || atf_fail "Cleanup failed"
+	ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
 }
 
 atf_test_case zfsd_hotspare_008_neg cleanup
@@ -375,14 +375,14 @@ zfsd_autoreplace_001_neg_cleanup()
 	. $(atf_get_srcdir)/../hotspare/hotspare.kshlib
 	. $(atf_get_srcdir)/../hotspare/hotspare.cfg
 
-	ksh93 $(atf_get_srcdir)/hotspare_cleanup.ksh || atf_fail "Cleanup failed"
+	ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
 }
 
 atf_test_case zfsd_autoreplace_002_pos cleanup
 zfsd_autoreplace_002_pos_head()
 {
 	atf_set "descr" "A pool with autoreplace set will replace by physical path"
-	atf_set "require.progs"  zpool camcontrol zfsd
+	atf_set "require.progs"  gnop zpool zfsd
 	atf_set "timeout" 3600
 }
 zfsd_autoreplace_002_pos_body()
@@ -405,7 +405,7 @@ zfsd_autoreplace_002_pos_cleanup()
 	. $(atf_get_srcdir)/../hotspare/hotspare.kshlib
 	. $(atf_get_srcdir)/../hotspare/hotspare.cfg
 
-	ksh93 $(atf_get_srcdir)/hotspare_cleanup.ksh || atf_fail "Cleanup failed"
+	ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
 }
 
 atf_test_case zfsd_autoreplace_003_pos cleanup
@@ -435,7 +435,7 @@ zfsd_autoreplace_003_pos_cleanup()
 	. $(atf_get_srcdir)/../hotspare/hotspare.kshlib
 	. $(atf_get_srcdir)/../hotspare/hotspare.cfg
 
-	ksh93 $(atf_get_srcdir)/hotspare_cleanup.ksh || atf_fail "Cleanup failed"
+	ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
 }
 
 atf_test_case zfsd_replace_001_pos cleanup
@@ -503,9 +503,9 @@ zfsd_replace_003_pos_head()
 zfsd_replace_003_pos_body()
 {
 	. $(atf_get_srcdir)/../../include/default.cfg
-	. $(atf_get_srcdir)/zfsd.cfg
+	. $(atf_get_srcdir)/../hotspare/hotspare.kshlib
+	. $(atf_get_srcdir)/../hotspare/hotspare.cfg
 
-	verify_disk_count "$DISKS" 3
 	ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
 	ksh93 $(atf_get_srcdir)/zfsd_replace_003_pos.ksh
 	if [[ $? != 0 ]]; then
@@ -525,7 +525,7 @@ atf_test_case zfsd_import_001_pos cleanup
 zfsd_import_001_pos_head()
 {
 	atf_set "descr" "If a removed drive gets reinserted while the pool is exported, it will detach its spare when imported."
-	atf_set "require.progs"  zfsd zpool
+	atf_set "require.progs"  gnop zfsd zpool
 	atf_set "timeout" 3600
 }
 zfsd_import_001_pos_body()



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201802082024.w18KOsBQ034658>