Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 13 Feb 2011 13:21:55 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r218638 - in projects/graid/head/sys: conf geom/raid modules/geom/geom_raid
Message-ID:  <201102131321.p1DDLtx1039938@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Sun Feb 13 13:21:55 2011
New Revision: 218638
URL: http://svn.freebsd.org/changeset/base/218638

Log:
  Add transformation module, handling different combinations of striping
  and adjacent mirroring in addition to specialized and more effective RAID0
  and RAID1 modules.
  
  With two data copies and 4/6/... disks it will become RAID10, with 3/5/...
  disks it will become RAID1E, with 2 -- overloaded RAID1.
  With one data copy it will be overkilling RAID0, with 3+ copies it will be
  overredundant RAID1/1E/10.
  
  At this moment it can read/write/dump volumes with 2 data copies and 2+
  disks. Rebuild, resync and recovery are missing for now.

Added:
  projects/graid/head/sys/geom/raid/tr_raid1e.c   (contents, props changed)
Modified:
  projects/graid/head/sys/conf/files
  projects/graid/head/sys/geom/raid/md_intel.c
  projects/graid/head/sys/modules/geom/geom_raid/Makefile

Modified: projects/graid/head/sys/conf/files
==============================================================================
--- projects/graid/head/sys/conf/files	Sun Feb 13 13:11:00 2011	(r218637)
+++ projects/graid/head/sys/conf/files	Sun Feb 13 13:21:55 2011	(r218638)
@@ -2092,6 +2092,7 @@ geom/raid/g_raid_tr_if.m	optional geom_r
 geom/raid/md_intel.c		optional geom_raid
 geom/raid/tr_raid0.c		optional geom_raid
 geom/raid/tr_raid1.c		optional geom_raid
+geom/raid/tr_raid1e.c		optional geom_raid
 geom/raid3/g_raid3.c		optional geom_raid3
 geom/raid3/g_raid3_ctl.c	optional geom_raid3
 geom/shsec/g_shsec.c		optional geom_shsec

Modified: projects/graid/head/sys/geom/raid/md_intel.c
==============================================================================
--- projects/graid/head/sys/geom/raid/md_intel.c	Sun Feb 13 13:11:00 2011	(r218637)
+++ projects/graid/head/sys/geom/raid/md_intel.c	Sun Feb 13 13:21:55 2011	(r218638)
@@ -586,7 +586,7 @@ g_raid_md_intel_supported(int level, int
 			return (0);
 		break;
 	case G_RAID_VOLUME_RL_RAID1E:
-		if (disks < 3)
+		if (disks < 2)
 			return (0);
 		if (!force && (disks != 4))
 			return (0);
@@ -1539,6 +1539,9 @@ makedisk:
 		/* Round size down to strip or sector. */
 		if (level == G_RAID_VOLUME_RL_RAID1)
 			size -= (size % sectorsize);
+		else if (level == G_RAID_VOLUME_RL_RAID1E &&
+		    (numdisks & 1) != 0)
+			size -= (size % (2 * strip));
 		else
 			size -= (size % strip);
 		if (size <= 0) {

Added: projects/graid/head/sys/geom/raid/tr_raid1e.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/graid/head/sys/geom/raid/tr_raid1e.c	Sun Feb 13 13:21:55 2011	(r218638)
@@ -0,0 +1,1159 @@
+/*-
+ * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/kobj.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <geom/geom.h>
+#include "geom/raid/g_raid.h"
+#include "g_raid_tr_if.h"
+
+#define N	2
+
+SYSCTL_DECL(_kern_geom_raid);
+SYSCTL_NODE(_kern_geom_raid, OID_AUTO, raid1e, CTLFLAG_RW, 0,
+    "RAID1E parameters");
+
+#define RAID1E_REBUILD_SLAB	(1 << 20) /* One transation in a rebuild */
+static int g_raid1e_rebuild_slab = RAID1E_REBUILD_SLAB;
+TUNABLE_INT("kern.geom.raid.raid1e.rebuild_slab_size",
+    &g_raid1e_rebuild_slab);
+SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_slab_size, CTLFLAG_RW,
+    &g_raid1e_rebuild_slab, 0,
+    "Amount of the disk to rebuild each read/write cycle of the rebuild.");
+
+#define RAID1E_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
+static int g_raid1e_rebuild_fair_io = RAID1E_REBUILD_FAIR_IO;
+TUNABLE_INT("kern.geom.raid.raid1e.rebuild_fair_io",
+    &g_raid1e_rebuild_fair_io);
+SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_fair_io, CTLFLAG_RW,
+    &g_raid1e_rebuild_fair_io, 0,
+    "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
+
+#define RAID1E_REBUILD_CLUSTER_IDLE 100
+static int g_raid1e_rebuild_cluster_idle = RAID1E_REBUILD_CLUSTER_IDLE;
+TUNABLE_INT("kern.geom.raid.raid1e.rebuild_cluster_idle",
+    &g_raid1e_rebuild_cluster_idle);
+SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW,
+    &g_raid1e_rebuild_cluster_idle, 0,
+    "Number of slabs to do each time we trigger a rebuild cycle");
+
+#define RAID1E_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
+static int g_raid1e_rebuild_meta_update = RAID1E_REBUILD_META_UPDATE;
+TUNABLE_INT("kern.geom.raid.raid1e.rebuild_meta_update",
+    &g_raid1e_rebuild_meta_update);
+SYSCTL_UINT(_kern_geom_raid_raid1e, OID_AUTO, rebuild_meta_update, CTLFLAG_RW,
+    &g_raid1e_rebuild_meta_update, 0,
+    "When to update the meta data.");
+
+static MALLOC_DEFINE(M_TR_RAID1E, "tr_raid1e_data", "GEOM_RAID RAID1E data");
+
+#define TR_RAID1E_NONE 0
+#define TR_RAID1E_REBUILD 1
+#define TR_RAID1E_RESYNC 2
+
+#define TR_RAID1E_F_DOING_SOME	0x1
+#define TR_RAID1E_F_LOCKED	0x2
+#define TR_RAID1E_F_ABORT	0x4
+
+struct g_raid_tr_raid1e_object {
+	struct g_raid_tr_object	 trso_base;
+	int			 trso_starting;
+	int			 trso_stopping;
+	int			 trso_type;
+	int			 trso_recover_slabs; /* slabs before rest */
+	int			 trso_fair_io;
+	int			 trso_meta_update;
+	int			 trso_flags;
+	struct g_raid_subdisk	*trso_failed_sd; /* like per volume */
+	void			*trso_buffer;	 /* Buffer space */
+	struct bio		 trso_bio;
+};
+
+static g_raid_tr_taste_t g_raid_tr_taste_raid1e;
+static g_raid_tr_event_t g_raid_tr_event_raid1e;
+static g_raid_tr_start_t g_raid_tr_start_raid1e;
+static g_raid_tr_stop_t g_raid_tr_stop_raid1e;
+static g_raid_tr_iostart_t g_raid_tr_iostart_raid1e;
+static g_raid_tr_iodone_t g_raid_tr_iodone_raid1e;
+static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1e;
+static g_raid_tr_locked_t g_raid_tr_locked_raid1e;
+static g_raid_tr_idle_t g_raid_tr_idle_raid1e;
+static g_raid_tr_free_t g_raid_tr_free_raid1e;
+
+static kobj_method_t g_raid_tr_raid1e_methods[] = {
+	KOBJMETHOD(g_raid_tr_taste,	g_raid_tr_taste_raid1e),
+	KOBJMETHOD(g_raid_tr_event,	g_raid_tr_event_raid1e),
+	KOBJMETHOD(g_raid_tr_start,	g_raid_tr_start_raid1e),
+	KOBJMETHOD(g_raid_tr_stop,	g_raid_tr_stop_raid1e),
+	KOBJMETHOD(g_raid_tr_iostart,	g_raid_tr_iostart_raid1e),
+	KOBJMETHOD(g_raid_tr_iodone,	g_raid_tr_iodone_raid1e),
+	KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1e),
+	KOBJMETHOD(g_raid_tr_locked,	g_raid_tr_locked_raid1e),
+	KOBJMETHOD(g_raid_tr_idle,	g_raid_tr_idle_raid1e),
+	KOBJMETHOD(g_raid_tr_free,	g_raid_tr_free_raid1e),
+	{ 0, 0 }
+};
+
+static struct g_raid_tr_class g_raid_tr_raid1e_class = {
+	"RAID1E",
+	g_raid_tr_raid1e_methods,
+	sizeof(struct g_raid_tr_raid1e_object),
+	.trc_priority = 200
+};
+
+static void g_raid_tr_raid1e_rebuild_abort(struct g_raid_tr_object *tr);
+static void g_raid_tr_raid1e_maybe_rebuild(struct g_raid_tr_object *tr,
+    struct g_raid_subdisk *sd);
+
+static inline void
+V2P(struct g_raid_volume *vol, off_t virt,
+    int *disk, off_t *offset, off_t *start)
+{
+	off_t nstrip;
+	u_int strip_size;
+
+	strip_size = vol->v_strip_size;
+	/* Strip number. */
+	nstrip = virt / strip_size;
+	/* Start position in strip. */
+	*start = virt % strip_size;
+	/* Disk number. */
+	*disk = (nstrip * N) % vol->v_disks_count;
+	/* Strip start position in disk. */
+	*offset = ((nstrip * N) / vol->v_disks_count) * strip_size;
+}
+
+static inline void
+P2V(struct g_raid_volume *vol, int disk, off_t offset,
+    off_t *virt, int *copy)
+{
+	off_t nstrip, start;
+	u_int strip_size;
+
+	strip_size = vol->v_strip_size;
+	/* Start position in strip. */
+	start = offset % strip_size;
+	/* Physical strip number. */
+	nstrip = (offset / strip_size) * vol->v_disks_count + disk;
+	/* Number of physical strip (copy) inside virtual strip. */
+	*copy = nstrip % N;
+	/* Offset in virtual space. */
+	*virt = (nstrip / N) * strip_size + start;
+}
+
+static int
+g_raid_tr_taste_raid1e(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
+{
+	struct g_raid_tr_raid1e_object *trs;
+
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1E ||
+	    tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_NONE)
+		return (G_RAID_TR_TASTE_FAIL);
+	trs->trso_starting = 1;
+	return (G_RAID_TR_TASTE_SUCCEED);
+}
+
+static int
+g_raid_tr_update_state_raid1e_even(struct g_raid_volume *vol)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_softc *sc;
+	struct g_raid_subdisk *sd, *bestsd, *worstsd;
+	int i, j, state, sstate;
+
+	sc = vol->v_softc;
+	trs = (struct g_raid_tr_raid1e_object *)vol->v_tr;
+	state = G_RAID_VOLUME_S_OPTIMAL;
+	for (i = 0; i < vol->v_disks_count / N; i++) {
+		bestsd = &vol->v_subdisks[i * N];
+		worstsd = &vol->v_subdisks[i * N];
+		for (j = 1; j < N; j++) {
+			sd = &vol->v_subdisks[i * N + j];
+			if (sd->sd_state > bestsd->sd_state)
+				bestsd = sd;
+			else if (sd->sd_state == bestsd->sd_state &&
+			    (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
+			     sd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
+			    sd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
+				bestsd = sd;
+			if (sd->sd_state < worstsd->sd_state)
+				worstsd = sd;
+		}
+		if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED &&
+		    bestsd->sd_state != G_RAID_SUBDISK_S_ACTIVE) {
+			/* We found reasonable candidate. */
+			G_RAID_DEBUG1(1, sc,
+			    "Promote subdisk %s:%d from %s to ACTIVE.",
+			    vol->v_name, bestsd->sd_pos,
+			    g_raid_subdisk_state2str(bestsd->sd_state));
+			g_raid_change_subdisk_state(bestsd,
+			    G_RAID_SUBDISK_S_ACTIVE);
+			g_raid_write_metadata(sc,
+			    vol, bestsd, bestsd->sd_disk);
+		}
+		if (worstsd->sd_state == G_RAID_SUBDISK_S_ACTIVE)
+			sstate = G_RAID_VOLUME_S_OPTIMAL;
+		else if (worstsd->sd_state >= G_RAID_SUBDISK_S_STALE)
+			sstate = G_RAID_VOLUME_S_SUBOPTIMAL;
+		else if (bestsd->sd_state == G_RAID_SUBDISK_S_ACTIVE)
+			sstate = G_RAID_VOLUME_S_DEGRADED;
+		else
+			sstate = G_RAID_VOLUME_S_BROKEN;
+		if (sstate < state)
+			state = sstate;
+	}
+	return (state);
+}
+
+static int
+g_raid_tr_update_state_raid1e_odd(struct g_raid_volume *vol)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_softc *sc;
+	struct g_raid_subdisk *sd, *bestsd, *worstsd;
+	int i, j, state, sstate;
+
+	sc = vol->v_softc;
+	trs = (struct g_raid_tr_raid1e_object *)vol->v_tr;
+	if (g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE) ==
+	    vol->v_disks_count)
+		return (G_RAID_VOLUME_S_OPTIMAL);
+	for (i = 0; i < vol->v_disks_count; i++) {
+		sd = &vol->v_subdisks[i];
+		if (sd->sd_state == G_RAID_SUBDISK_S_UNINITIALIZED) {
+			/* We found reasonable candidate. */
+			G_RAID_DEBUG1(1, sc,
+			    "Promote subdisk %s:%d from %s to STALE.",
+			    vol->v_name, sd->sd_pos,
+			    g_raid_subdisk_state2str(sd->sd_state));
+			g_raid_change_subdisk_state(sd,
+			    G_RAID_SUBDISK_S_STALE);
+			g_raid_write_metadata(sc, vol, sd, sd->sd_disk);
+		}
+	}
+	state = G_RAID_VOLUME_S_OPTIMAL;
+	for (i = 0; i < vol->v_disks_count; i++) {
+		bestsd = &vol->v_subdisks[i];
+		worstsd = &vol->v_subdisks[i];
+		for (j = 1; j < N; j++) {
+			sd = &vol->v_subdisks[(i + j) % vol->v_disks_count];
+			if (sd->sd_state > bestsd->sd_state)
+				bestsd = sd;
+			else if (sd->sd_state == bestsd->sd_state &&
+			    (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
+			     sd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
+			    sd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
+				bestsd = sd;
+			if (sd->sd_state < worstsd->sd_state)
+				worstsd = sd;
+		}
+		if (worstsd->sd_state == G_RAID_SUBDISK_S_ACTIVE)
+			sstate = G_RAID_VOLUME_S_OPTIMAL;
+		else if (worstsd->sd_state >= G_RAID_SUBDISK_S_STALE)
+			sstate = G_RAID_VOLUME_S_SUBOPTIMAL;
+		else if (bestsd->sd_state >= G_RAID_SUBDISK_S_STALE)
+			sstate = G_RAID_VOLUME_S_DEGRADED;
+		else
+			sstate = G_RAID_VOLUME_S_BROKEN;
+		if (sstate < state)
+			state = sstate;
+	}
+	return (state);
+}
+
+static int
+g_raid_tr_update_state_raid1e(struct g_raid_volume *vol,
+    struct g_raid_subdisk *sd)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_softc *sc;
+	u_int s;
+
+	sc = vol->v_softc;
+	trs = (struct g_raid_tr_raid1e_object *)vol->v_tr;
+	if (trs->trso_stopping &&
+	    (trs->trso_flags & TR_RAID1E_F_DOING_SOME) == 0)
+		s = G_RAID_VOLUME_S_STOPPED;
+	else if (trs->trso_starting)
+		s = G_RAID_VOLUME_S_STARTING;
+	else {
+		if ((vol->v_disks_count % N) == 0)
+			s = g_raid_tr_update_state_raid1e_even(vol);
+		else
+			s = g_raid_tr_update_state_raid1e_odd(vol);
+		g_raid_tr_raid1e_maybe_rebuild(vol->v_tr, sd);
+	}
+	if (s != vol->v_state) {
+		g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
+		    G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
+		    G_RAID_EVENT_VOLUME);
+		g_raid_change_volume_state(vol, s);
+		if (!trs->trso_starting && !trs->trso_stopping)
+			g_raid_write_metadata(sc, vol, NULL, NULL);
+	}
+	return (0);
+}
+
+static void
+g_raid_tr_raid1e_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
+    struct g_raid_disk *disk)
+{
+	/*
+	 * We don't fail the last disk in the pack, since it still has decent
+	 * data on it and that's better than failing the disk if it is the root
+	 * file system.
+	 *
+	 * XXX should this be controlled via a tunable?  It makes sense for
+	 * the volume that has / on it.  I can't think of a case where we'd
+	 * want the volume to go away on this kind of event.
+	 */
+	if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
+	    g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
+		return;
+	g_raid_fail_disk(sc, sd, disk);
+}
+
+static void
+g_raid_tr_raid1e_rebuild_some(struct g_raid_tr_object *tr)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_subdisk *sd, *good_sd;
+	struct bio *bp;
+
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	if (trs->trso_flags & TR_RAID1E_F_DOING_SOME)
+		return;
+	sd = trs->trso_failed_sd;
+	good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
+	if (good_sd == NULL) {
+		g_raid_tr_raid1e_rebuild_abort(tr);
+		return;
+	}
+	bp = &trs->trso_bio;
+	memset(bp, 0, sizeof(*bp));
+	bp->bio_offset = sd->sd_rebuild_pos;
+	bp->bio_length = MIN(g_raid1e_rebuild_slab,
+	    sd->sd_volume->v_mediasize - sd->sd_rebuild_pos);
+	bp->bio_data = trs->trso_buffer;
+	bp->bio_cmd = BIO_READ;
+	bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
+	bp->bio_caller1 = good_sd;
+	trs->trso_flags |= TR_RAID1E_F_DOING_SOME;
+	trs->trso_flags |= TR_RAID1E_F_LOCKED;
+	g_raid_lock_range(sd->sd_volume,	/* Lock callback starts I/O */
+	   bp->bio_offset, bp->bio_length, NULL, bp);
+}
+
+static void
+g_raid_tr_raid1e_rebuild_done(struct g_raid_tr_raid1e_object *trs)
+{
+	struct g_raid_volume *vol;
+	struct g_raid_subdisk *sd;
+
+	vol = trs->trso_base.tro_volume;
+	sd = trs->trso_failed_sd;
+	g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
+	free(trs->trso_buffer, M_TR_RAID1E);
+	trs->trso_buffer = NULL;
+	trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME;
+	trs->trso_type = TR_RAID1E_NONE;
+	trs->trso_recover_slabs = 0;
+	trs->trso_failed_sd = NULL;
+	g_raid_tr_update_state_raid1e(vol, NULL);
+}
+
+static void
+g_raid_tr_raid1e_rebuild_finish(struct g_raid_tr_object *tr)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_subdisk *sd;
+
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	sd = trs->trso_failed_sd;
+	G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
+	    "Subdisk %s:%d-%s rebuild completed.",
+	    sd->sd_volume->v_name, sd->sd_pos,
+	    sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
+	g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
+	sd->sd_rebuild_pos = 0;
+	g_raid_tr_raid1e_rebuild_done(trs);
+}
+
+static void
+g_raid_tr_raid1e_rebuild_abort(struct g_raid_tr_object *tr)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_subdisk *sd;
+	struct g_raid_volume *vol;
+	off_t len;
+
+	vol = tr->tro_volume;
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	sd = trs->trso_failed_sd;
+	if (trs->trso_flags & TR_RAID1E_F_DOING_SOME) {
+		G_RAID_DEBUG1(1, vol->v_softc,
+		    "Subdisk %s:%d-%s rebuild is aborting.",
+		    sd->sd_volume->v_name, sd->sd_pos,
+		    sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
+		trs->trso_flags |= TR_RAID1E_F_ABORT;
+	} else {
+		G_RAID_DEBUG1(0, vol->v_softc,
+		    "Subdisk %s:%d-%s rebuild aborted.",
+		    sd->sd_volume->v_name, sd->sd_pos,
+		    sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
+		trs->trso_flags &= ~TR_RAID1E_F_ABORT;
+		if (trs->trso_flags & TR_RAID1E_F_LOCKED) {
+			trs->trso_flags &= ~TR_RAID1E_F_LOCKED;
+			len = MIN(g_raid1e_rebuild_slab,
+			    vol->v_mediasize - sd->sd_rebuild_pos);
+			g_raid_unlock_range(tr->tro_volume,
+			    sd->sd_rebuild_pos, len);
+		}
+		g_raid_tr_raid1e_rebuild_done(trs);
+	}
+}
+
+static void
+g_raid_tr_raid1e_rebuild_start(struct g_raid_tr_object *tr)
+{
+	struct g_raid_volume *vol;
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_subdisk *sd, *fsd;
+
+	vol = tr->tro_volume;
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	if (trs->trso_failed_sd) {
+		G_RAID_DEBUG1(1, vol->v_softc,
+		    "Already rebuild in start rebuild. pos %jd\n",
+		    (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
+		return;
+	}
+	sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
+	if (sd == NULL) {
+		G_RAID_DEBUG1(1, vol->v_softc,
+		    "No active disk to rebuild.  night night.");
+		return;
+	}
+	fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
+	if (fsd == NULL)
+		fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
+	if (fsd == NULL) {
+		fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
+		if (fsd != NULL) {
+			fsd->sd_rebuild_pos = 0;
+			g_raid_change_subdisk_state(fsd,
+			    G_RAID_SUBDISK_S_RESYNC);
+			g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
+		} else {
+			fsd = g_raid_get_subdisk(vol,
+			    G_RAID_SUBDISK_S_UNINITIALIZED);
+			if (fsd == NULL)
+				fsd = g_raid_get_subdisk(vol,
+				    G_RAID_SUBDISK_S_NEW);
+			if (fsd != NULL) {
+				fsd->sd_rebuild_pos = 0;
+				g_raid_change_subdisk_state(fsd,
+				    G_RAID_SUBDISK_S_REBUILD);
+				g_raid_write_metadata(vol->v_softc,
+				    vol, fsd, NULL);
+			}
+		}
+	}
+	if (fsd == NULL) {
+		G_RAID_DEBUG1(1, vol->v_softc,
+		    "No failed disk to rebuild.  night night.");
+		return;
+	}
+	trs->trso_failed_sd = fsd;
+	G_RAID_DEBUG1(0, vol->v_softc,
+	    "Subdisk %s:%d-%s rebuild start at %jd.",
+	    fsd->sd_volume->v_name, fsd->sd_pos,
+	    fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
+	    trs->trso_failed_sd->sd_rebuild_pos);
+	trs->trso_type = TR_RAID1E_REBUILD;
+	trs->trso_buffer = malloc(g_raid1e_rebuild_slab, M_TR_RAID1E, M_WAITOK);
+	trs->trso_meta_update = g_raid1e_rebuild_meta_update;
+	g_raid_tr_raid1e_rebuild_some(tr);
+}
+
+
+static void
+g_raid_tr_raid1e_maybe_rebuild(struct g_raid_tr_object *tr,
+    struct g_raid_subdisk *sd)
+{
+	struct g_raid_volume *vol;
+	struct g_raid_tr_raid1e_object *trs;
+	int na, nr;
+	
+	/*
+	 * If we're stopping, don't do anything.  If we don't have at least one
+	 * good disk and one bad disk, we don't do anything.  And if there's a
+	 * 'good disk' stored in the trs, then we're in progress and we punt.
+	 * If we make it past all these checks, we need to rebuild.
+	 */
+	vol = tr->tro_volume;
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	if (trs->trso_stopping)
+		return;
+	na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
+	nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
+	    g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
+	switch(trs->trso_type) {
+	case TR_RAID1E_NONE:
+		if (na == 0)
+			return;
+		if (nr == 0) {
+			nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
+			    g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
+			    g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
+			if (nr == 0)
+				return;
+		}
+		g_raid_tr_raid1e_rebuild_start(tr);
+		break;
+	case TR_RAID1E_REBUILD:
+		if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
+			g_raid_tr_raid1e_rebuild_abort(tr);
+		break;
+	case TR_RAID1E_RESYNC:
+		break;
+	}
+}
+
+static int
+g_raid_tr_event_raid1e(struct g_raid_tr_object *tr,
+    struct g_raid_subdisk *sd, u_int event)
+{
+
+	g_raid_tr_update_state_raid1e(tr->tro_volume, sd);
+	return (0);
+}
+
+static int
+g_raid_tr_start_raid1e(struct g_raid_tr_object *tr)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_volume *vol;
+
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	vol = tr->tro_volume;
+	trs->trso_starting = 0;
+	g_raid_tr_update_state_raid1e(vol, NULL);
+	return (0);
+}
+
+static int
+g_raid_tr_stop_raid1e(struct g_raid_tr_object *tr)
+{
+	struct g_raid_tr_raid1e_object *trs;
+	struct g_raid_volume *vol;
+
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	vol = tr->tro_volume;
+	trs->trso_starting = 0;
+	trs->trso_stopping = 1;
+	g_raid_tr_update_state_raid1e(vol, NULL);
+	return (0);
+}
+
+/*
+ * Select the disk to read from.  Take into account: subdisk state, running
+ * error recovery, average disk load, head position and possible cache hits.
+ */
+#define ABS(x)		(((x) >= 0) ? (x) : (-(x)))
+static int
+g_raid_tr_raid1e_select_read_disk(struct g_raid_volume *vol,
+    int no, off_t off, off_t len, u_int mask)
+{
+	struct g_raid_subdisk *sd;
+	off_t offset;
+	int i, best, prio, bestprio;
+
+	best = -1;
+	bestprio = INT_MAX;
+	for (i = 0; i < N; i++) {
+		sd = &vol->v_subdisks[(no + i) % vol->v_disks_count];
+		offset = off;
+		if (no + i >= vol->v_disks_count)
+			offset += vol->v_strip_size;
+
+		prio = G_RAID_SUBDISK_LOAD(sd);
+		if ((mask & (1 << sd->sd_pos)) != 0)
+			continue;
+		switch (sd->sd_state) {
+		case G_RAID_SUBDISK_S_ACTIVE:
+			break;
+		case G_RAID_SUBDISK_S_RESYNC:
+			if (offset + off < sd->sd_rebuild_pos)
+				break;
+			/* FALLTHROUGH */
+		case G_RAID_SUBDISK_S_STALE:
+			prio += i << 24;
+			break;
+		case G_RAID_SUBDISK_S_REBUILD:
+			if (offset + off < sd->sd_rebuild_pos)
+				break;
+			/* FALLTHROUGH */
+		default:
+			continue;
+		}
+		prio += min(sd->sd_recovery, 255) << 16;
+		/* If disk head is precisely in position - highly prefer it. */
+		if (G_RAID_SUBDISK_POS(sd) == offset)
+			prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
+		else
+		/* If disk head is close to position - prefer it. */
+		if (ABS(G_RAID_SUBDISK_POS(sd) - offset) <
+		    G_RAID_SUBDISK_TRACK_SIZE)
+			prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
+		if (prio < bestprio) {
+			bestprio = prio;
+			best = i;
+		}
+	}
+	return (best);
+}
+
+static void
+g_raid_tr_iostart_raid1e_read(struct g_raid_tr_object *tr, struct bio *bp)
+{
+	struct g_raid_volume *vol;
+	struct g_raid_subdisk *sd;
+	struct bio_queue_head queue;
+	struct bio *cbp;
+	char *addr;
+	off_t offset, start, length, remain;
+	u_int no, strip_size;
+	int best;
+
+	vol = tr->tro_volume;
+	addr = bp->bio_data;
+	strip_size = vol->v_strip_size;
+	V2P(vol, bp->bio_offset, &no, &offset, &start);
+	remain = bp->bio_length;
+	bioq_init(&queue);
+	while (remain > 0) {
+		length = MIN(strip_size - start, remain);
+		best = g_raid_tr_raid1e_select_read_disk(vol,
+		    no, offset, length, 0);
+		KASSERT(best >= 0, ("No readable disk in volume %s!",
+		    vol->v_name));
+		no += best;
+		if (no >= vol->v_disks_count) {
+			no -= vol->v_disks_count;
+			offset += strip_size;
+		}
+		cbp = g_clone_bio(bp);
+		if (cbp == NULL)
+			goto failure;
+		cbp->bio_offset = offset + start;
+		cbp->bio_data = addr;
+		cbp->bio_length = length;
+		cbp->bio_caller1 = &vol->v_subdisks[no];
+		bioq_insert_tail(&queue, cbp);
+		no += N - best;
+		if (no >= vol->v_disks_count) {
+			no -= vol->v_disks_count;
+			offset += strip_size;
+		}
+		remain -= length;
+		addr += length;
+		start = 0;
+	}
+	for (cbp = bioq_first(&queue); cbp != NULL;
+	    cbp = bioq_first(&queue)) {
+		bioq_remove(&queue, cbp);
+		sd = cbp->bio_caller1;
+		cbp->bio_caller1 = NULL;
+		g_raid_subdisk_iostart(sd, cbp);
+	}
+	return;
+failure:
+	for (cbp = bioq_first(&queue); cbp != NULL;
+	    cbp = bioq_first(&queue)) {
+		bioq_remove(&queue, cbp);
+		g_destroy_bio(cbp);
+	}
+	if (bp->bio_error == 0)
+		bp->bio_error = ENOMEM;
+	g_raid_iodone(bp, bp->bio_error);
+}
+
+static void
+g_raid_tr_iostart_raid1e_write(struct g_raid_tr_object *tr, struct bio *bp)
+{
+	struct g_raid_volume *vol;
+	struct g_raid_subdisk *sd;
+	struct bio_queue_head queue;
+	struct bio *cbp;
+	char *addr;
+	off_t offset, start, length, remain;
+	u_int no, strip_size;
+	int i;
+
+	vol = tr->tro_volume;
+	addr = bp->bio_data;
+	strip_size = vol->v_strip_size;
+	V2P(vol, bp->bio_offset, &no, &offset, &start);
+	remain = bp->bio_length;
+	bioq_init(&queue);
+	while (remain > 0) {
+		length = MIN(strip_size - start, remain);
+		for (i = 0; i < N; i++) {
+			sd = &vol->v_subdisks[no];
+			switch (sd->sd_state) {
+			case G_RAID_SUBDISK_S_ACTIVE:
+			case G_RAID_SUBDISK_S_STALE:
+			case G_RAID_SUBDISK_S_RESYNC:
+				break;
+			case G_RAID_SUBDISK_S_REBUILD:
+				if (offset + start >= sd->sd_rebuild_pos)
+					goto nextdisk;
+				break;
+			default:
+				goto nextdisk;
+			}
+			cbp = g_clone_bio(bp);
+			if (cbp == NULL)
+				goto failure;
+			cbp->bio_offset = offset + start;
+			cbp->bio_data = addr;
+			cbp->bio_length = length;
+			cbp->bio_caller1 = sd;
+			bioq_insert_tail(&queue, cbp);
+nextdisk:
+			if (++no >= vol->v_disks_count) {
+				no = 0;
+				offset += strip_size;
+			}
+		}
+		remain -= length;
+		addr += length;
+		start = 0;
+	}
+	for (cbp = bioq_first(&queue); cbp != NULL;
+	    cbp = bioq_first(&queue)) {
+		bioq_remove(&queue, cbp);
+		sd = cbp->bio_caller1;
+		cbp->bio_caller1 = NULL;
+		g_raid_subdisk_iostart(sd, cbp);
+	}
+	return;
+failure:
+	for (cbp = bioq_first(&queue); cbp != NULL;
+	    cbp = bioq_first(&queue)) {
+		bioq_remove(&queue, cbp);
+		g_destroy_bio(cbp);
+	}
+	if (bp->bio_error == 0)
+		bp->bio_error = ENOMEM;
+	g_raid_iodone(bp, bp->bio_error);
+}
+
+static void
+g_raid_tr_iostart_raid1e(struct g_raid_tr_object *tr, struct bio *bp)
+{
+	struct g_raid_volume *vol;
+	struct g_raid_tr_raid1e_object *trs;
+
+	vol = tr->tro_volume;
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
+	    vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
+	    vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
+		g_raid_iodone(bp, EIO);
+		return;
+	}
+	/*
+	 * If we're rebuilding, squeeze in rebuild activity every so often,
+	 * even when the disk is busy.  Be sure to only count real I/O
+	 * to the disk.  All 'SPECIAL' I/O is traffic generated to the disk
+	 * by this module.
+	 */
+	if (trs->trso_failed_sd != NULL &&
+	    !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
+		/* Make this new or running now round short. */
+		trs->trso_recover_slabs = 0;
+		if (--trs->trso_fair_io <= 0) {
+			trs->trso_fair_io = g_raid1e_rebuild_fair_io;
+			g_raid_tr_raid1e_rebuild_some(tr);
+		}
+	}
+	switch (bp->bio_cmd) {
+	case BIO_READ:
+		g_raid_tr_iostart_raid1e_read(tr, bp);
+		break;
+	case BIO_WRITE:
+		g_raid_tr_iostart_raid1e_write(tr, bp);
+		break;
+	case BIO_DELETE:
+		g_raid_iodone(bp, EIO);
+		break;
+	case BIO_FLUSH:
+		g_raid_tr_flush_common(tr, bp);
+		break;
+	default:
+		KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
+		    bp->bio_cmd, vol->v_name));
+		break;
+	}
+}
+
+static void
+g_raid_tr_iodone_raid1e(struct g_raid_tr_object *tr,
+    struct g_raid_subdisk *sd, struct bio *bp)
+{
+	struct bio *cbp;
+	struct g_raid_subdisk *nsd;
+	struct g_raid_volume *vol;
+	struct bio *pbp;
+	struct g_raid_tr_raid1e_object *trs;
+	uintptr_t *mask;
+	int error, do_write;
+
+	trs = (struct g_raid_tr_raid1e_object *)tr;
+	vol = tr->tro_volume;
+	if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
+		if (trs->trso_type == TR_RAID1E_REBUILD) {
+			if (bp->bio_cmd == BIO_READ) {
+
+				/* Immediately abort rebuild, if requested. */
+				if (trs->trso_flags & TR_RAID1E_F_ABORT) {
+					trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME;
+					g_raid_tr_raid1e_rebuild_abort(tr);
+					return;
+				}
+
+				/* On read error, skip and cross fingers. */
+				if (bp->bio_error != 0) {
+					G_RAID_LOGREQ(0, bp,
+					    "Read error during rebuild (%d), "
+					    "possible data loss!",
+					    bp->bio_error);
+					goto rebuild_round_done;
+				}
+
+				/*
+				 * The read operation finished, queue the
+				 * write and get out.
+				 */
+				G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
+				    bp->bio_error);
+				bp->bio_cmd = BIO_WRITE;
+				bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
+				bp->bio_offset = bp->bio_offset;
+				bp->bio_length = bp->bio_length;
+				G_RAID_LOGREQ(4, bp, "Queueing reguild write.");
+				g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
+			} else {
+				/*
+				 * The write operation just finished.  Do
+				 * another.  We keep cloning the master bio
+				 * since it has the right buffers allocated to
+				 * it.
+				 */
+				G_RAID_LOGREQ(4, bp,
+				    "rebuild write done. Error %d",
+				    bp->bio_error);
+				nsd = trs->trso_failed_sd;
+				if (bp->bio_error != 0 ||
+				    trs->trso_flags & TR_RAID1E_F_ABORT) {
+					if ((trs->trso_flags &
+					    TR_RAID1E_F_ABORT) == 0) {
+						g_raid_tr_raid1e_fail_disk(sd->sd_softc,
+						    nsd, nsd->sd_disk);
+					}
+					trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME;
+					g_raid_tr_raid1e_rebuild_abort(tr);
+					return;
+				}
+rebuild_round_done:
+				nsd = trs->trso_failed_sd;
+				trs->trso_flags &= ~TR_RAID1E_F_LOCKED;
+				g_raid_unlock_range(sd->sd_volume,
+				    bp->bio_offset, bp->bio_length);
+				nsd->sd_rebuild_pos += bp->bio_length;
+				if (nsd->sd_rebuild_pos >= vol->v_mediasize) {
+					g_raid_tr_raid1e_rebuild_finish(tr);
+					return;
+				}
+
+				/* Abort rebuild if we are stopping */
+				if (trs->trso_stopping) {
+					trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME;
+					g_raid_tr_raid1e_rebuild_abort(tr);
+					return;
+				}
+
+				if (--trs->trso_meta_update <= 0) {
+					g_raid_write_metadata(vol->v_softc,
+					    vol, nsd, nsd->sd_disk);
+					trs->trso_meta_update =
+					    g_raid1e_rebuild_meta_update;
+				}
+				trs->trso_flags &= ~TR_RAID1E_F_DOING_SOME;
+				if (--trs->trso_recover_slabs <= 0)
+					return;
+				/* Run next rebuild iteration. */
+				g_raid_tr_raid1e_rebuild_some(tr);
+			}
+		} else if (trs->trso_type == TR_RAID1E_RESYNC) {
+			/*
+			 * read good sd, read bad sd in parallel.  when both
+			 * done, compare the buffers.  write good to the bad
+			 * if different.  do the next bit of work.
+			 */
+			panic("Somehow, we think we're doing a resync");
+		}
+		return;
+	}
+	pbp = bp->bio_parent;
+	pbp->bio_inbed++;
+	if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
+		/*
+		 * Read failed on first drive.  Retry the read error on
+		 * another disk drive, if available, before erroring out the
+		 * read.
+		 */
+		sd->sd_disk->d_read_errs++;
+		G_RAID_LOGREQ(0, bp,
+		    "Read error (%d), %d read errors total",
+		    bp->bio_error, sd->sd_disk->d_read_errs);
+
+		/*
+		 * If there are too many read errors, we move to degraded.
+		 * XXX Do we want to FAIL the drive (eg, make the user redo
+		 * everything to get it back in sync), or just degrade the
+		 * drive, which kicks off a resync?
+		 */
+		do_write = 1;
+		if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201102131321.p1DDLtx1039938>