Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 17 Jan 2011 18:58:28 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r217507 - in projects/graid/head: sbin/geom/class/raid sys/geom/raid
Message-ID:  <201101171858.p0HIwSl5009753@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Mon Jan 17 18:58:28 2011
New Revision: 217507
URL: http://svn.freebsd.org/changeset/base/217507

Log:
  Add FAILED disk and subdisks states. Disk falls into that state after
  some number of fatal I/O errors. I/O to this disk should be avoided.
  Same time RAID0 still can try to use it, as soon as there is no other.
  Disk in this state is a candidate to be replaced with spare, if present.
  Other option I had is to just drop failed disk out of the array, but
  implemented way seems more alike to what Intel MatrixRAID BIOS does.
  
  Add method to allow transformation modules claim disks as failed.

Modified:
  projects/graid/head/sbin/geom/class/raid/geom_raid.c
  projects/graid/head/sys/geom/raid/g_raid.c
  projects/graid/head/sys/geom/raid/g_raid.h
  projects/graid/head/sys/geom/raid/g_raid_md_if.m
  projects/graid/head/sys/geom/raid/md_intel.c
  projects/graid/head/sys/geom/raid/tr_raid0.c
  projects/graid/head/sys/geom/raid/tr_raid1.c

Modified: projects/graid/head/sbin/geom/class/raid/geom_raid.c
==============================================================================
--- projects/graid/head/sbin/geom/class/raid/geom_raid.c	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sbin/geom/class/raid/geom_raid.c	Mon Jan 17 18:58:28 2011	(r217507)
@@ -65,6 +65,9 @@ struct g_command class_commands[] = {
 	{ "remove", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
 	    "[-v] name prov"
 	},
+	{ "fail", G_FLAG_VERBOSE, NULL, G_NULL_OPTS,
+	    "[-v] name prov"
+	},
 	{ "stop", G_FLAG_VERBOSE, NULL,
 	    {
 		{ 'f', "force", NULL, G_TYPE_BOOL },

Modified: projects/graid/head/sys/geom/raid/g_raid.c
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.c	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sys/geom/raid/g_raid.c	Mon Jan 17 18:58:28 2011	(r217507)
@@ -51,6 +51,10 @@ static MALLOC_DEFINE(M_RAID, "raid_data"
 
 SYSCTL_DECL(_kern_geom);
 SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff");
+u_int g_raid_aggressive_spare = 0;
+TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare);
+SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW,
+    &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
 u_int g_raid_debug = 2;
 TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
@@ -122,14 +126,18 @@ g_raid_disk_state2str(int state)
 	switch (state) {
 	case G_RAID_DISK_S_NONE:
 		return ("NONE");
-	case G_RAID_DISK_S_ACTIVE:
-		return ("ACTIVE");
-	case G_RAID_DISK_S_SPARE:
-		return ("SPARE");
 	case G_RAID_DISK_S_OFFLINE:
 		return ("OFFLINE");
+	case G_RAID_DISK_S_FAILED:
+		return ("FAILED");
+	case G_RAID_DISK_S_STALE_FAILED:
+		return ("STALE_FAILED");
+	case G_RAID_DISK_S_SPARE:
+		return ("SPARE");
 	case G_RAID_DISK_S_STALE:
 		return ("STALE");
+	case G_RAID_DISK_S_ACTIVE:
+		return ("ACTIVE");
 	default:
 		return ("INVALID");
 	}
@@ -154,6 +162,8 @@ g_raid_subdisk_state2str(int state)
 	switch (state) {
 	case G_RAID_SUBDISK_S_NONE:
 		return ("NONE");
+	case G_RAID_SUBDISK_S_FAILED:
+		return ("FAILED");
 	case G_RAID_SUBDISK_S_NEW:
 		return ("NEW");
 	case G_RAID_SUBDISK_S_STALE:
@@ -1700,6 +1710,14 @@ void g_raid_write_metadata(struct g_raid
 		G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
 }
 
+void g_raid_fail_disk(struct g_raid_softc *sc,
+    struct g_raid_subdisk *sd, struct g_raid_disk *disk)
+{
+
+	if (sc->sc_md)
+		G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
+}
+
 static void
 g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
     struct g_consumer *cp, struct g_provider *pp)

Modified: projects/graid/head/sys/geom/raid/g_raid.h
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid.h	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sys/geom/raid/g_raid.h	Mon Jan 17 18:58:28 2011	(r217507)
@@ -48,6 +48,7 @@ struct g_raid_tr_object;
 					 G_RAID_DEVICE_FLAG_NOFAILSYNC)
 
 #ifdef _KERNEL
+extern u_int g_raid_aggressive_spare;
 extern u_int g_raid_debug;
 extern u_int g_raid_start_timeout;
 
@@ -98,12 +99,13 @@ struct g_raid_event {
 	int			 e_error;
 	TAILQ_ENTRY(g_raid_event) e_next;
 };
-#define G_RAID_DISK_S_NONE		0x00
-#define G_RAID_DISK_S_ACTIVE		0x01
-#define G_RAID_DISK_S_SPARE		0x02
-#define G_RAID_DISK_S_OFFLINE		0x03
-#define G_RAID_DISK_S_STALE		0x04
-#define G_RAID_DISK_S_FAILED		0x05
+#define G_RAID_DISK_S_NONE		0x00	/* State is unknown. */
+#define G_RAID_DISK_S_OFFLINE		0x01	/* Missing disk placeholder. */
+#define G_RAID_DISK_S_FAILED		0x02	/* Failed. */
+#define G_RAID_DISK_S_STALE_FAILED	0x03	/* Old failed. */
+#define G_RAID_DISK_S_SPARE		0x04	/* Hot-spare. */
+#define G_RAID_DISK_S_STALE		0x05	/* Old disk, unused now. */
+#define G_RAID_DISK_S_ACTIVE		0x06	/* Operational. */
 
 #define G_RAID_DISK_E_DISCONNECTED	0x01
 
@@ -120,14 +122,16 @@ struct g_raid_disk {
 };
 
 #define G_RAID_SUBDISK_S_NONE		0x00	/* Absent. */
-#define G_RAID_SUBDISK_S_NEW		0x01	/* Blank. */
-#define G_RAID_SUBDISK_S_STALE		0x02	/* Dirty. */
-#define G_RAID_SUBDISK_S_REBUILD	0x03	/* Blank + rebuild. */
-#define G_RAID_SUBDISK_S_RESYNC		0x04	/* Dirty + check/repair. */
-#define G_RAID_SUBDISK_S_ACTIVE		0x05	/* Usable. */
+#define G_RAID_SUBDISK_S_FAILED		0x01	/* Failed. */
+#define G_RAID_SUBDISK_S_NEW		0x02	/* Blank. */
+#define G_RAID_SUBDISK_S_STALE		0x03	/* Dirty. */
+#define G_RAID_SUBDISK_S_REBUILD	0x04	/* Blank + rebuild. */
+#define G_RAID_SUBDISK_S_RESYNC		0x05	/* Dirty + check/repair. */
+#define G_RAID_SUBDISK_S_ACTIVE		0x06	/* Usable. */
 
 #define G_RAID_SUBDISK_E_NEW		0x01
-#define G_RAID_SUBDISK_E_DISCONNECTED	0x02
+#define G_RAID_SUBDISK_E_FAILED		0x02
+#define G_RAID_SUBDISK_E_DISCONNECTED	0x03
 
 struct g_raid_subdisk {
 	struct g_raid_softc	*sd_softc;	/* Back-pointer to softc. */
@@ -310,6 +314,8 @@ void g_raid_change_volume_state(struct g
 
 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
     struct g_raid_subdisk *sd, struct g_raid_disk *disk);
+void g_raid_fail_disk(struct g_raid_softc *sc,
+    struct g_raid_subdisk *sd, struct g_raid_disk *disk);
 
 u_int g_raid_ndisks(struct g_raid_softc *sc, int state);
 u_int g_raid_nsubdisks(struct g_raid_volume *vol, int state);

Modified: projects/graid/head/sys/geom/raid/g_raid_md_if.m
==============================================================================
--- projects/graid/head/sys/geom/raid/g_raid_md_if.m	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sys/geom/raid/g_raid_md_if.m	Mon Jan 17 18:58:28 2011	(r217507)
@@ -100,6 +100,13 @@ METHOD int write {
 	struct g_raid_disk *disk;
 };
 
+# fail_disk() - mark disk as failed and remove it from use.
+METHOD int fail_disk {
+	struct g_raid_md_object *md;
+	struct g_raid_subdisk *sd;
+	struct g_raid_disk *disk;
+};
+
 # free_disk() - disk destructor.
 METHOD int free_disk {
 	struct g_raid_md_object *md;

Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sys/geom/raid/md_intel.c	Mon Jan 17 18:58:28 2011	(r217507)
@@ -106,10 +106,10 @@ struct intel_raid_disk {
 	uint32_t	sectors;
 	uint32_t	id;
 	uint32_t	flags;
-#define INTEL_F_SPARE           0x01
-#define INTEL_F_ASSIGNED        0x02
-#define INTEL_F_DOWN            0x04
-#define INTEL_F_ONLINE          0x08
+#define INTEL_F_SPARE		0x01
+#define INTEL_F_ASSIGNED	0x02
+#define INTEL_F_FAILED		0x04
+#define INTEL_F_ONLINE		0x08
 
 	uint32_t	filler[5];
 } __packed;
@@ -182,6 +182,7 @@ static g_raid_md_taste_t g_raid_md_taste
 static g_raid_md_event_t g_raid_md_event_intel;
 static g_raid_md_ctl_t g_raid_md_ctl_intel;
 static g_raid_md_write_t g_raid_md_write_intel;
+static g_raid_md_fail_disk_t g_raid_md_fail_disk_intel;
 static g_raid_md_free_disk_t g_raid_md_free_disk_intel;
 static g_raid_md_free_t g_raid_md_free_intel;
 
@@ -191,6 +192,7 @@ static kobj_method_t g_raid_md_intel_met
 	KOBJMETHOD(g_raid_md_event,	g_raid_md_event_intel),
 	KOBJMETHOD(g_raid_md_ctl,	g_raid_md_ctl_intel),
 	KOBJMETHOD(g_raid_md_write,	g_raid_md_write_intel),
+	KOBJMETHOD(g_raid_md_fail_disk,	g_raid_md_fail_disk_intel),
 	KOBJMETHOD(g_raid_md_free_disk,	g_raid_md_free_disk_intel),
 	KOBJMETHOD(g_raid_md_free,	g_raid_md_free_intel),
 	{ 0, 0 }
@@ -243,6 +245,9 @@ g_raid_md_intel_print(struct intel_raid_
 	struct intel_raid_map *mmap;
 	int i, j, k;
 
+	if (g_raid_debug < 1)
+		return;
+
 	printf("********* ATA Intel MatrixRAID Metadata *********\n");
 	printf("intel_id            <%.24s>\n", meta->intel_id);
 	printf("version             <%.6s>\n", meta->version);
@@ -496,7 +501,10 @@ g_raid_md_intel_start_disk(struct g_raid
 	disk_pos = intel_meta_find_disk(meta, pd->pd_disk_meta.serial);
 	if (disk_pos < 0) {
 		G_RAID_DEBUG(1, "Unknown, probably stale disk");
-		g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE);
+		if (pd->pd_disk_meta.flags & INTEL_F_FAILED)
+			g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE);
+		else
+			g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED);
 		return;
 	}
 
@@ -520,7 +528,10 @@ g_raid_md_intel_start_disk(struct g_raid
 	disk = olddisk;
 
 	/* Welcome the "new" disk. */
-	g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
+	if (meta->disk[disk_pos].flags & INTEL_F_FAILED)
+		g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED);
+	else
+		g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
 	TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
 		mvol = intel_get_volume(meta,
 		    (uintptr_t)(sd->sd_volume->v_md_data));
@@ -530,7 +541,10 @@ g_raid_md_intel_start_disk(struct g_raid
 		else
 			mmap1 = mmap0;
 
-		if (mvol->migr_state == 0) {
+		if (meta->disk[disk_pos].flags & INTEL_F_FAILED) {
+			g_raid_change_subdisk_state(sd,
+			    G_RAID_SUBDISK_S_FAILED);
+		} else if (mvol->migr_state == 0) {
 			if (mmap0->disk_idx[sd->sd_pos] & INTEL_DI_RBLD) {
 				g_raid_change_subdisk_state(sd,
 				    G_RAID_SUBDISK_S_NEW);
@@ -918,9 +932,9 @@ g_raid_md_event_intel(struct g_raid_md_o
 		/* Write updated metadata to all disks. */
 		g_raid_md_write_intel(md, NULL, NULL, NULL);
 
-		/* Check if anything left. */
-		if (g_raid_ndisks(sc, G_RAID_DISK_S_NONE) == 0 &&
-		    g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) == 0)
+		/* Check if anything left except placeholders. */
+		if (g_raid_ndisks(sc, -1) ==
+		    g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
 			g_raid_destroy_node(sc, 0);
 		break;
 	}
@@ -934,7 +948,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 	struct g_raid_softc *sc;
 	struct g_raid_volume *vol;
 	struct g_raid_subdisk *sd;
-	struct g_raid_disk *disk;
+	struct g_raid_disk *disk, *disk1;
 	struct g_raid_md_intel_object *mdi;
 	struct g_raid_md_intel_perdisk *pd;
 	struct g_consumer *cp;
@@ -944,7 +958,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 	int *nargs;
 	uint64_t size, sectorsize, strip;
 	intmax_t *sizearg, *striparg;
-	int numdisks, i, len, level, qual;
+	int numdisks, i, len, level, qual, disk_pos;
 	int error;
 
 	sc = md->mdo_softc;
@@ -1121,7 +1135,8 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 		g_raid_md_write_intel(md, NULL, NULL, NULL);
 		return (0);
 	}
-	if (strcmp(verb, "remove") == 0) {
+	if (strcmp(verb, "remove") == 0 ||
+	    strcmp(verb, "fail") == 0) {
 		if (*nargs < 2) {
 			gctl_error(req, "Invalid number of arguments.");
 			return (-1);
@@ -1150,6 +1165,12 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 				error = -3;
 				break;
 			}
+
+			if (strcmp(verb, "fail") == 0) {
+				g_raid_md_fail_disk_intel(md, NULL, disk);
+				continue;
+			}
+
 			pd = (struct g_raid_md_intel_perdisk *)disk->d_md_data;
 
 			/* Erase metadata on deleting disk. */
@@ -1180,9 +1201,9 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 		/* Write updated metadata to remaining disks. */
 		g_raid_md_write_intel(md, NULL, NULL, NULL);
 
-		/* Check if anything left. */
-		if (g_raid_ndisks(sc, G_RAID_DISK_S_NONE) == 0 &&
-		    g_raid_ndisks(sc, G_RAID_DISK_S_ACTIVE) == 0)
+		/* Check if anything left except placeholders. */
+		if (g_raid_ndisks(sc, -1) ==
+		    g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
 			g_raid_destroy_node(sc, 0);
 		return (error);
 	}
@@ -1198,7 +1219,8 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 				    disk->d_md_data;
 				if (pd->pd_disk_pos < 0)
 					continue;
-				if (disk->d_state == G_RAID_DISK_S_OFFLINE)
+				if (disk->d_state == G_RAID_DISK_S_OFFLINE ||
+				    disk->d_state == G_RAID_DISK_S_FAILED)
 					break;
 			}
 			if (disk == NULL) {
@@ -1206,6 +1228,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 				error = -2;
 				break;
 			}
+			disk_pos = pd->pd_disk_pos;
 
 			/* Get disk name. */
 			snprintf(arg, sizeof(arg), "arg%d", i);
@@ -1263,6 +1286,21 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 				}
 			}
 
+			/* If there is failed disk in slot - put it aside. */
+			if (disk->d_state == G_RAID_DISK_S_FAILED) {
+				disk1 = g_raid_create_disk(sc);
+				disk->d_consumer->private = disk1;
+				disk1->d_consumer = disk->d_consumer;
+				disk1->d_md_data = (void *)pd;
+				pd->pd_disk_pos = -2;
+				g_raid_change_disk_state(disk,
+				    G_RAID_DISK_S_STALE_FAILED);
+
+				pd = malloc(sizeof(*pd), M_MD_INTEL, M_WAITOK | M_ZERO);
+				pd->pd_disk_pos = disk_pos;
+				disk->d_md_data = (void *)pd;
+			}
+
 			/* Read disk metadata. */
 			error = g_raid_md_get_label(cp,
 			    &pd->pd_disk_meta.serial[0], INTEL_SERIAL_LEN);
@@ -1280,10 +1318,6 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 			cp->private = disk;
 			disk->d_consumer = cp;
 			pd->pd_disk_meta.sectors = pp->mediasize / pp->sectorsize;
-			if (size > pp->mediasize)
-				size = pp->mediasize;
-			if (sectorsize < pp->sectorsize)
-				sectorsize = pp->sectorsize;
 			pd->pd_disk_meta.id = 0;
 			pd->pd_disk_meta.flags = INTEL_F_ASSIGNED | INTEL_F_ONLINE;
 
@@ -1324,7 +1358,7 @@ g_raid_md_write_intel(struct g_raid_md_o
 	sc = md->mdo_softc;
 	mdi = (struct g_raid_md_intel_object *)md;
 
-	/* Bump generation, as written metadata may differ from previous. */
+	/* Bump generation. Newly written metadata may differ from previous. */
 	mdi->mdio_generation++;
 
 	/* Count number of disks. */
@@ -1335,13 +1369,12 @@ g_raid_md_write_intel(struct g_raid_md_o
 			continue;
 		numdisks++;
 		if (disk->d_state == G_RAID_DISK_S_ACTIVE) {
-			pd->pd_disk_meta.flags |= INTEL_F_ASSIGNED;
-			pd->pd_disk_meta.flags |= INTEL_F_ONLINE;
+			pd->pd_disk_meta.flags =
+			    INTEL_F_ASSIGNED | INTEL_F_ONLINE;
 		} else if (disk->d_state == G_RAID_DISK_S_FAILED) {
-			pd->pd_disk_meta.flags &= ~INTEL_F_ASSIGNED;
-			pd->pd_disk_meta.flags |= INTEL_F_DOWN;
+			pd->pd_disk_meta.flags = INTEL_F_FAILED | INTEL_F_ASSIGNED;
 		} else {
-			pd->pd_disk_meta.flags &= ~INTEL_F_ONLINE;
+			pd->pd_disk_meta.flags = INTEL_F_ASSIGNED;
 			if (pd->pd_disk_meta.id != 0xffffffff) {
 				pd->pd_disk_meta.id = 0xffffffff;
 				len = strlen(pd->pd_disk_meta.serial);
@@ -1490,7 +1523,8 @@ g_raid_md_write_intel(struct g_raid_md_o
 				if (mvol->migr_state)
 					mmap1->disk_idx[sdi] |= INTEL_DI_RBLD;
 			}
-			if (sd->sd_state == G_RAID_SUBDISK_S_NONE &&
+			if ((sd->sd_state == G_RAID_SUBDISK_S_NONE ||
+			     sd->sd_state == G_RAID_SUBDISK_S_FAILED) &&
 			    mmap0->failed_disk_num == 0xff) {
 				mmap0->failed_disk_num = sdi;
 				if (mvol->migr_state)
@@ -1524,6 +1558,52 @@ g_raid_md_write_intel(struct g_raid_md_o
 }
 
 static int
+g_raid_md_fail_disk_intel(struct g_raid_md_object *md,
+    struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
+{
+	struct g_raid_softc *sc;
+	struct g_raid_md_intel_object *mdi;
+	struct g_raid_md_intel_perdisk *pd;
+	struct g_raid_subdisk *sd;
+
+	sc = md->mdo_softc;
+	mdi = (struct g_raid_md_intel_object *)md;
+	pd = (struct g_raid_md_intel_perdisk *)tdisk->d_md_data;
+
+	/* We can't fail disk that is not a part of array now. */
+	if (pd->pd_disk_pos < 0)
+		return (-1);
+
+	/*
+	 * Mark disk as failed in metadata and try to write that metadata
+	 * to the disk itself to prevent it's later resurrection as STALE.
+	 */
+	mdi->mdio_meta->disk[pd->pd_disk_pos].flags = INTEL_F_FAILED;
+	pd->pd_disk_meta.flags = INTEL_F_FAILED;
+	g_raid_md_intel_print(mdi->mdio_meta);
+	if (tdisk->d_consumer)
+		intel_meta_write(tdisk->d_consumer, mdi->mdio_meta);
+
+	/* Change states. */
+	g_raid_change_disk_state(tdisk, G_RAID_DISK_S_FAILED);
+	TAILQ_FOREACH(sd, &tdisk->d_subdisks, sd_next) {
+		g_raid_change_subdisk_state(sd,
+		    G_RAID_SUBDISK_S_FAILED);
+		g_raid_event_send(sd, G_RAID_SUBDISK_E_FAILED,
+		    G_RAID_EVENT_SUBDISK);
+	}
+
+	/* Write updated metadata to remaining disks. */
+	g_raid_md_write_intel(md, NULL, NULL, tdisk);
+
+	/* Check if anything left except placeholders. */
+	if (g_raid_ndisks(sc, -1) ==
+	    g_raid_ndisks(sc, G_RAID_DISK_S_OFFLINE))
+		g_raid_destroy_node(sc, 0);
+	return (0);
+}
+
+static int
 g_raid_md_free_disk_intel(struct g_raid_md_object *md,
     struct g_raid_disk *disk)
 {

Modified: projects/graid/head/sys/geom/raid/tr_raid0.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid0.c	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sys/geom/raid/tr_raid0.c	Mon Jan 17 18:58:28 2011	(r217507)
@@ -93,7 +93,7 @@ g_raid_tr_update_state_raid0(struct g_ra
 	struct g_raid_tr_raid0_object *trs;
 	struct g_raid_softc *sc;
 	u_int s;
-	int n;
+	int n, f;
 
 	sc = vol->v_softc;
 	trs = (struct g_raid_tr_raid0_object *)vol->v_tr;
@@ -101,8 +101,12 @@ g_raid_tr_update_state_raid0(struct g_ra
 		s = G_RAID_VOLUME_S_STOPPED;
 	else {
 		n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
-		if (n == vol->v_disks_count) {
-			s = G_RAID_VOLUME_S_OPTIMAL;
+		f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED);
+		if (n + f == vol->v_disks_count) {
+			if (f == 0)
+				s = G_RAID_VOLUME_S_OPTIMAL;
+			else
+				s = G_RAID_VOLUME_S_SUBOPTIMAL;
 			trs->trso_starting = 0;
 		} else if (trs->trso_starting)
 			s = G_RAID_VOLUME_S_STARTING;
@@ -134,10 +138,13 @@ g_raid_tr_event_raid0(struct g_raid_tr_o
 	sc = vol->v_softc;
 	if (event == G_RAID_SUBDISK_E_NEW) {
 		state = sd->sd_state;
-		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
+		if (state != G_RAID_SUBDISK_S_FAILED)
+			g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
 		if (state != sd->sd_state &&
 		    !trs->trso_starting && !trs->trso_stopped)
 			g_raid_write_metadata(sc, vol, sd, NULL);
+	} else if (event == G_RAID_SUBDISK_E_FAILED) {
+//		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_FAILED);
 	} else
 		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
 	g_raid_tr_update_state_raid0(vol);

Modified: projects/graid/head/sys/geom/raid/tr_raid1.c
==============================================================================
--- projects/graid/head/sys/geom/raid/tr_raid1.c	Mon Jan 17 17:30:35 2011	(r217506)
+++ projects/graid/head/sys/geom/raid/tr_raid1.c	Mon Jan 17 18:58:28 2011	(r217507)
@@ -133,6 +133,8 @@ g_raid_tr_event_raid1(struct g_raid_tr_o
 	vol = tr->tro_volume;
 	if (event == G_RAID_SUBDISK_E_NEW) {
 //		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
+	} else if (event == G_RAID_SUBDISK_E_FAILED) {
+//		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_FAILED);
 	} else
 		g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
 	g_raid_tr_update_state_raid1(vol);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201101171858.p0HIwSl5009753>