Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 23 Mar 2011 15:43:31 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r219910 - projects/graid/head/sys/geom/raid
Message-ID:  <201103231543.p2NFhV4R099056@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Wed Mar 23 15:43:31 2011
New Revision: 219910
URL: http://svn.freebsd.org/changeset/base/219910

Log:
  As soon as NVidia metadata have no generation numbers, bump volume ID each
  time array started incomplete after timeout waiting or when one of disks
  was hot-disconnected. If lost disk reappear later, it won't corrupt data,
  but will be reported as separate array. Windows driver does the same.
  
  As soon as volume ID is not persistent now (in addition to being too long)
  and so not very suitable for management, use sequentional numbering for
  generating GEOM node names.

Modified:
  projects/graid/head/sys/geom/raid/md_nvidia.c

Modified: projects/graid/head/sys/geom/raid/md_nvidia.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_nvidia.c	Wed Mar 23 15:22:59 2011	(r219909)
+++ projects/graid/head/sys/geom/raid/md_nvidia.c	Wed Mar 23 15:43:31 2011	(r219910)
@@ -144,6 +144,8 @@ static struct g_raid_md_class g_raid_md_
 	.mdc_priority = 100
 };
 
+static int NVidiaNodeID = 1;
+
 static void
 g_raid_md_nvidia_print(struct nvidia_raid_conf *meta)
 {
@@ -420,7 +422,7 @@ g_raid_md_nvidia_start_disk(struct g_rai
 	/* Find disk position in metadata by it's serial. */
 	if (pd->pd_meta != NULL) {
 		disk_pos = pd->pd_meta->disk_number;
-		if (disk_pos >= meta->total_disks)
+		if (disk_pos >= meta->total_disks || mdi->mdio_started)
 			disk_pos = -3;
 	} else
 		disk_pos = -3;
@@ -518,13 +520,6 @@ nofit:
 			/* New or ex-spare disk. */
 			g_raid_change_subdisk_state(sd,
 			    G_RAID_SUBDISK_S_NEW);
-		} else if (mdi->mdio_started) {
-			/*
-			 * As soon as we have no generations --
-			 * treat every hot-plugged disk as new.
-			 */
-			g_raid_change_subdisk_state(sd,
-			    G_RAID_SUBDISK_S_NEW);
 		} else if (meta->state == NVIDIA_S_REBUILD &&
 		    (pd->pd_meta->disk_status & 0x100)) {
 			/* Rebuilding disk. */
@@ -793,8 +788,8 @@ g_raid_md_create_nvidia(struct g_raid_md
 
 	mdi = (struct g_raid_md_nvidia_object *)md;
 	arc4rand(&mdi->mdio_volume_id, 16, 0);
-	snprintf(name, sizeof(name), "NVidia-%08x",
-	    (uint32_t)mdi->mdio_volume_id[0]);
+	snprintf(name, sizeof(name), "NVidia-%d",
+	    atomic_fetchadd_int(&NVidiaNodeID, 1));
 	sc = g_raid_create_node(mp, name, md);
 	if (sc == NULL)
 		return (G_RAID_MD_TASTE_FAIL);
@@ -900,8 +895,8 @@ search:
 	} else { /* Not found matching node -- create one. */
 		result = G_RAID_MD_TASTE_NEW;
 		memcpy(&mdi->mdio_volume_id, &meta->volume_id, 16);
-		snprintf(name, sizeof(name), "NVidia-%08x",
-		    (uint32_t)mdi->mdio_volume_id[0]);
+		snprintf(name, sizeof(name), "NVidia-%d",
+		    atomic_fetchadd_int(&NVidiaNodeID, 1));
 		sc = g_raid_create_node(mp, name, md);
 		md->mdo_softc = sc;
 		geom = sc->sc_geom;
@@ -967,8 +962,11 @@ g_raid_md_event_nvidia(struct g_raid_md_
 	if (disk == NULL) {
 		switch (event) {
 		case G_RAID_NODE_E_START:
-			if (!mdi->mdio_started)
+			if (!mdi->mdio_started) {
+				/* Bump volume ID to drop missing disks. */
+				arc4rand(&mdi->mdio_volume_id, 16, 0);
 				g_raid_md_nvidia_start(sc);
+			}
 			return (0);
 		}
 		return (-1);
@@ -995,8 +993,14 @@ g_raid_md_event_nvidia(struct g_raid_md_
 			g_raid_destroy_disk(disk);
 		}
 
-		/* Write updated metadata to all disks. */
-		g_raid_md_write_nvidia(md, NULL, NULL, NULL);
+		if (mdi->mdio_started) {
+			/* Bump volume ID to prevent disk resurrection. */
+			if (pd->pd_disk_pos >= 0)
+				arc4rand(&mdi->mdio_volume_id, 16, 0);
+
+			/* Write updated metadata to all disks. */
+			g_raid_md_write_nvidia(md, NULL, NULL, NULL);
+		}
 
 		/* Check if anything left except placeholders. */
 		if (g_raid_ndisks(sc, -1) ==



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201103231543.p2NFhV4R099056>