Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 13 Dec 2008 23:49:09 +0000 (UTC)
From:      Ulf Lilleengen <lulf@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r186068 - projects/gvinum/sys/geom/vinum
Message-ID:  <200812132349.mBDNn9mL043501@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: lulf
Date: Sat Dec 13 23:49:09 2008
New Revision: 186068
URL: http://svn.freebsd.org/changeset/base/186068

Log:
  - More assertions where appropriate.
  - Minor fixes to error handling.
  - Fix a bug where calling start on a volume with subdisk in the down state would
    make it panic.
  - Allow RAID5 volumes to skip initialization.

Modified:
  projects/gvinum/sys/geom/vinum/geom_vinum.c
  projects/gvinum/sys/geom/vinum/geom_vinum_init.c
  projects/gvinum/sys/geom/vinum/geom_vinum_plex.c
  projects/gvinum/sys/geom/vinum/geom_vinum_raid5.c
  projects/gvinum/sys/geom/vinum/geom_vinum_state.c
  projects/gvinum/sys/geom/vinum/geom_vinum_subr.c
  projects/gvinum/sys/geom/vinum/geom_vinum_var.h

Modified: projects/gvinum/sys/geom/vinum/geom_vinum.c
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum.c	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum.c	Sat Dec 13 23:49:09 2008	(r186068)
@@ -111,6 +111,8 @@ gv_done(struct bio *bp)
 	struct g_geom *gp;
 	struct gv_softc *sc;
 	
+	KASSERT(bp != NULL, ("NULL bp"));
+
 	gp = bp->bio_from->geom;
 	sc = gp->softc;
 	bp->bio_cflags |= GV_BIO_DONE;

Modified: projects/gvinum/sys/geom/vinum/geom_vinum_init.c
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum_init.c	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum_init.c	Sat Dec 13 23:49:09 2008	(r186068)
@@ -104,13 +104,8 @@ gv_start_plex(struct gv_plex *p)
 
 	KASSERT(p != NULL, ("gv_start_plex: NULL p"));
 
-/*	if (p->state == GV_PLEX_UP)
-		return (0);*/
-
 	error = 0;
 	v = p->vol_sc;
-/*	if ((v != NULL) && (v->plexcount > 1))
-		error = gv_sync(v);*/
 	if (p->org == GV_PLEX_STRIPED) {
 		grow = 0;
 		LIST_FOREACH(s, &p->subdisks, in_plex) {

Modified: projects/gvinum/sys/geom/vinum/geom_vinum_plex.c
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum_plex.c	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum_plex.c	Sat Dec 13 23:49:09 2008	(r186068)
@@ -210,11 +210,12 @@ gv_plex_normal_request(struct gv_plex *p
 	off_t real_len, real_off;
 	int i, err, sdno;
 
-	err = ENXIO;
 	s = NULL;
 	sdno = -1;
 	real_len = real_off = 0;
 
+	err = ENXIO;
+
 	if (p == NULL || LIST_EMPTY(&p->subdisks)) 
 		goto bad;
 
@@ -225,8 +226,11 @@ gv_plex_normal_request(struct gv_plex *p
 		bioq_disksort(p->rqueue, bp);
 		return (-1); /* "Fail", and delay request. */
 	}
-	if (err)
+	if (err) {
+		err = ENXIO;
 		goto bad;
+	}
+	err = ENXIO;
 
 	/* Find the right subdisk. */
 	i = 0;
@@ -245,9 +249,17 @@ gv_plex_normal_request(struct gv_plex *p
 	case GV_SD_UP:
 		/* If the subdisk is up, just continue. */
 		break;
+	case GV_SD_DOWN:
+		if (bp->bio_cflags & GV_BIO_INTERNAL)
+			G_VINUM_DEBUG(0, "subdisk must be in the stale state in"
+			    " order to perform administrative requests");
+		goto bad;
 	case GV_SD_STALE:
-		if (!(bp->bio_cflags & GV_BIO_SYNCREQ))
+		if (!(bp->bio_cflags & GV_BIO_SYNCREQ)) {
+			G_VINUM_DEBUG(0, "subdisk stale, unable to perform "
+			    "regular requests");
 			goto bad;
+		}
 
 		G_VINUM_DEBUG(1, "sd %s is initializing", s->name);
 		gv_set_sd_state(s, GV_SD_INITIALIZING, GV_SETSTATE_FORCE);
@@ -258,7 +270,6 @@ gv_plex_normal_request(struct gv_plex *p
 		break;
 	default:
 		/* All other subdisk states mean it's not accessible. */
-		err = EINVAL;
 		goto bad;
 	}
 
@@ -280,8 +291,18 @@ gv_plex_normal_request(struct gv_plex *p
 	bioq_insert_tail(p->bqueue, cbp); 
 	return (real_len);
 bad:
-	/* Building the sub-request failed. */
 	G_VINUM_LOGREQ(0, bp, "plex request failed.");
+	/* Building the sub-request failed. If internal BIO, do not deliver. */
+	if (bp->bio_cflags & GV_BIO_INTERNAL) {
+		if (bp->bio_cflags & GV_BIO_MALLOC)
+			g_free(bp->bio_data);
+		g_destroy_bio(bp);
+		/* Reset flags. */
+		p->flags &= ~GV_PLEX_SYNCING;
+		p->flags &= ~GV_PLEX_REBUILDING;
+		p->flags &= ~GV_PLEX_GROWING;
+		return (-1);
+	}
 	g_io_deliver(bp, err);
 	return (-1);
 }
@@ -427,6 +448,8 @@ gv_plex_raid5_done(struct gv_plex *p, st
 			gv_rebuild_complete(p, pbp);
 		} else if (pbp->bio_cflags & GV_BIO_INIT) {
 			gv_init_complete(p, pbp);
+		} else if (pbp->bio_cflags & GV_BIO_SYNCREQ) {
+			gv_sync_complete(p, pbp);
 		} else if (pbp->bio_pflags & GV_BIO_SYNCREQ) {
 			gv_grow_complete(p, pbp);
 		} else {
@@ -545,6 +568,9 @@ gv_sync_request(struct gv_plex *from, st
 {
 	struct bio *bp;
 
+	KASSERT(from != NULL, ("NULL from"));
+	KASSERT(to != NULL, ("NULL to"));
+
 	bp = g_new_bio();
 	if (bp == NULL) {
 		G_VINUM_DEBUG(0, "sync from '%s' failed at offset "
@@ -583,9 +609,14 @@ gv_sync_complete(struct gv_plex *to, str
 	g_topology_assert_not();
 
 	err = 0;
+	KASSERT(to != NULL, ("NULL to"));
+	KASSERT(bp != NULL, ("NULL bp"));
 	from = bp->bio_caller2;
+	KASSERT(from != NULL, ("NULL from"));
 	v = to->vol_sc;
+	KASSERT(v != NULL, ("NULL v"));
 	sc = v->vinumconf;
+	KASSERT(sc != NULL, ("NULL sc"));
 
 	/* If it was a read, write it. */
 	if (bp->bio_cmd == BIO_READ) {
@@ -600,11 +631,11 @@ gv_sync_complete(struct gv_plex *to, str
 		if (bp->bio_offset + bp->bio_length >= from->size) {
 			G_VINUM_DEBUG(1, "syncing of %s from %s completed",
 			    to->name, from->name);
-			to->flags &= ~GV_PLEX_SYNCING;
-			to->synced = 0;
 			/* Update our state. */
 			LIST_FOREACH(s, &to->subdisks, in_plex)
 				gv_set_sd_state(s, GV_SD_UP, 0);
+			to->flags &= ~GV_PLEX_SYNCING;
+			to->synced = 0;
 		} else {
 			offset = bp->bio_offset + bp->bio_length;
 			err = gv_sync_request(from, to, offset,
@@ -701,11 +732,7 @@ gv_grow_complete(struct gv_plex *p, stru
 		/* Find the real size of the plex. */
 		sdcount = gv_sdcount(p, 1);
 		s = LIST_FIRST(&p->subdisks);
-		/* XXX: should not ever happen */
-		if (s == NULL) {
-			G_VINUM_DEBUG(0, "error growing plex without subdisks");
-			return;
-		}
+		KASSERT(s != NULL, ("NULL s"));
 		origsize = (s->size * (sdcount - 1));
 		if (bp->bio_offset + bp->bio_length >= origsize) {
 			G_VINUM_DEBUG(1, "growing of %s completed", p->name);

Modified: projects/gvinum/sys/geom/vinum/geom_vinum_raid5.c
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum_raid5.c	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum_raid5.c	Sat Dec 13 23:49:09 2008	(r186068)
@@ -61,6 +61,8 @@ gv_raid5_start(struct gv_plex *p, struct
 	delay = 0;
 	wp = g_malloc(sizeof(*wp), M_WAITOK | M_ZERO);
 	wp->bio = bp;
+	wp->waiting = NULL;
+	wp->parity = NULL;
 	TAILQ_INIT(&wp->bits);
 
 	if (bp->bio_cflags & GV_BIO_REBUILD)
@@ -80,7 +82,7 @@ gv_raid5_start(struct gv_plex *p, struct
 	 * Building the sub-request failed, we probably need to clean up a lot.
 	 */
 	if (err) {
-		G_VINUM_LOGREQ(0, bp, "plex request failed.");
+		G_VINUM_LOGREQ(0, bp, "raid5 plex request failed.");
 		TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
 			TAILQ_REMOVE(&wp->bits, bq, queue);
 			g_free(bq);
@@ -117,6 +119,17 @@ gv_raid5_start(struct gv_plex *p, struct
 			cbp = bioq_takefirst(p->bqueue);
 		}
 
+		/* If internal, stop and reset state. */
+		if (bp->bio_cflags & GV_BIO_INTERNAL) {
+			if (bp->bio_cflags & GV_BIO_MALLOC)
+				g_free(cbp->bio_data);
+			g_destroy_bio(bp);
+			/* Reset flags. */
+			p->flags &= ~GV_PLEX_SYNCING;
+			p->flags &= ~GV_PLEX_REBUILDING;
+			p->flags &= ~GV_PLEX_GROWING;
+			return (NULL);
+		}
 		g_io_deliver(bp, err);
 		return (NULL);
 	}
@@ -385,8 +398,13 @@ gv_raid5_request(struct gv_plex *p, stru
 	/* Our data stripe is missing. */
 	if (original->state != GV_SD_UP)
 		type = REQ_TYPE_DEGRADED;
+
+	/* If synchronizing request, just write it if disks are stale. */
+	if (original->state == GV_SD_STALE && parity->state == GV_SD_STALE &&
+	    bp->bio_cflags & GV_BIO_SYNCREQ && bp->bio_cmd == BIO_WRITE) {
+		type = REQ_TYPE_NORMAL;
 	/* Our parity stripe is missing. */
-	if (parity->state != GV_SD_UP) {
+	} else if (parity->state != GV_SD_UP) {
 		/* We cannot take another failure if we're already degraded. */
 		if (type != REQ_TYPE_NORMAL)
 			return (ENXIO);

Modified: projects/gvinum/sys/geom/vinum/geom_vinum_state.c
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum_state.c	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum_state.c	Sat Dec 13 23:49:09 2008	(r186068)
@@ -243,8 +243,11 @@ gv_set_sd_state(struct gv_sd *s, int new
 			if (p == NULL || flags & GV_SETSTATE_FORCE)
 				break;
 
-			if ((p->org != GV_PLEX_RAID5) &&
-			    (p->vol_sc->plexcount == 1))
+			if ((p->org != GV_PLEX_RAID5 &&
+			    p->vol_sc->plexcount == 1) ||
+			    (p->flags & GV_PLEX_SYNCING &&
+			    p->synced > 0 &&
+			    p->org == GV_PLEX_RAID5))
 				break;
 			else
 				return (GV_ERR_SETSTATE);

Modified: projects/gvinum/sys/geom/vinum/geom_vinum_subr.c
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum_subr.c	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum_subr.c	Sat Dec 13 23:49:09 2008	(r186068)
@@ -486,9 +486,6 @@ gv_update_plex_config(struct gv_plex *p)
 
 	KASSERT(p != NULL, ("gv_update_plex_config: NULL p"));
 
-	/* This is what we want the plex to be. */
-	state = GV_PLEX_UP;
-
 	/* The plex was added to an already running volume. */
 	if (p->flags & GV_PLEX_ADDED)
 		gv_set_plex_state(p, GV_PLEX_DOWN, GV_SETSTATE_FORCE);
@@ -540,16 +537,23 @@ gv_update_plex_config(struct gv_plex *p)
 	p->size = gv_plex_size(p);
 	if (p->sdcount == 0)
 		gv_set_plex_state(p, GV_PLEX_DOWN, GV_SETSTATE_FORCE);
-	else if ((p->flags & GV_PLEX_ADDED) ||
-	    ((p->org == GV_PLEX_RAID5) && (p->flags & GV_PLEX_NEWBORN))) {
+	else if (p->org == GV_PLEX_RAID5 && p->flags & GV_PLEX_NEWBORN) {
 		LIST_FOREACH(s, &p->subdisks, in_plex)
-			gv_set_sd_state(s, GV_SD_STALE, GV_SETSTATE_FORCE);
+			gv_set_sd_state(s, GV_SD_UP, GV_SETSTATE_FORCE);
+		/* If added to a volume, we want the plex to be down. */
+		state = (p->flags & GV_PLEX_ADDED) ? GV_PLEX_DOWN : GV_PLEX_UP;
+		gv_set_plex_state(p, state, GV_SETSTATE_FORCE);
 		p->flags &= ~GV_PLEX_ADDED;
+	} else if (p->flags & GV_PLEX_ADDED) {
+		LIST_FOREACH(s, &p->subdisks, in_plex)
+			gv_set_sd_state(s, GV_SD_STALE, GV_SETSTATE_FORCE);
 		gv_set_plex_state(p, GV_PLEX_DOWN, GV_SETSTATE_FORCE);
+		p->flags &= ~GV_PLEX_ADDED;
 	} else if (p->state == GV_PLEX_UP) {
 		LIST_FOREACH(s, &p->subdisks, in_plex) {
 			if (s->flags & GV_SD_GROW) {
-				p->state = GV_PLEX_GROWABLE;
+				gv_set_plex_state(p, GV_PLEX_GROWABLE,
+				    GV_SETSTATE_FORCE);
 				break;
 			}
 		}
@@ -1124,9 +1128,13 @@ int
 gv_attach_plex(struct gv_plex *p, struct gv_volume *v, int rename)
 {
 	struct gv_sd *s;
+	struct gv_softc *sc;
 
 	g_topology_assert();
 
+	sc = p->vinumconf;
+	KASSERT(sc != NULL, ("NULL sc"));
+
 	if (p->vol_sc != NULL) {
 		G_VINUM_DEBUG(1, "unable to attach %s: already attached to %s",
 		    p->name, p->volume);

Modified: projects/gvinum/sys/geom/vinum/geom_vinum_var.h
==============================================================================
--- projects/gvinum/sys/geom/vinum/geom_vinum_var.h	Sat Dec 13 23:36:34 2008	(r186067)
+++ projects/gvinum/sys/geom/vinum/geom_vinum_var.h	Sat Dec 13 23:49:09 2008	(r186068)
@@ -117,6 +117,8 @@
 #define	GV_BIO_CHECK	0x40
 #define	GV_BIO_PARITY	0x80
 #define	GV_BIO_RETRY	0x100
+#define GV_BIO_INTERNAL \
+    (GV_BIO_SYNCREQ | GV_BIO_INIT | GV_BIO_REBUILD |GV_BIO_CHECK)
 
 /* Error codes to be used within gvinum. */
 #define	GV_ERR_SETSTATE		(-1)	/* Error setting state. */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200812132349.mBDNn9mL043501>