Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 13 Mar 2019 20:28:07 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-11@freebsd.org
Subject:   svn commit: r345114 - stable/11/sys/cam/ctl
Message-ID:  <201903132028.x2DKS7m1040314@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Wed Mar 13 20:28:07 2019
New Revision: 345114
URL: https://svnweb.freebsd.org/changeset/base/345114

Log:
  MFC r344636: Refactor command ordering/blocking mechanism in CTL.
  
  Replace long per-LUN queue of blocked commands, scanned on each command
  completion and sometimes even twice, causing up to O(n^^2) processing cost,
  by much shorter per-command blocked queues, scanned only when respective
  command completes, and check only commands before the previous blocker,
  reducing cost to O(n).
  
  While there, unblock aborted commands to make them "complete" ASAP to be
  removed from the OOA queue and so not waste time ordering other commands
  against them.  Aborted commands that were not sent to execution yet should
  have no visible side effects, so this is safe and easy optimization now,
  comparing to commands already in processing, which are a still pain.
  
  Together those two optimizations should fix quite pathological case, when
  due to backend slowness CTL accumulated many thousands of blocked requests,
  partially aborted by initiator and so supposedly not even existing, but
  still wasting CTL CPU time.

Modified:
  stable/11/sys/cam/ctl/ctl.c
  stable/11/sys/cam/ctl/ctl_frontend_ioctl.c
  stable/11/sys/cam/ctl/ctl_io.h
  stable/11/sys/cam/ctl/ctl_private.h
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/cam/ctl/ctl.c
==============================================================================
--- stable/11/sys/cam/ctl/ctl.c	Wed Mar 13 20:27:48 2019	(r345113)
+++ stable/11/sys/cam/ctl/ctl.c	Wed Mar 13 20:28:07 2019	(r345114)
@@ -496,8 +496,11 @@ static ctl_action ctl_extent_check_seq(union ctl_io *i
 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
     union ctl_io *pending_io, union ctl_io *ooa_io);
 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
-				union ctl_io *starting_io);
-static int ctl_check_blocked(struct ctl_lun *lun);
+				union ctl_io **starting_io);
+static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io,
+    bool skip);
+static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io,
+    bool skip);
 static int ctl_scsiio_lun_check(struct ctl_lun *lun,
 				const struct ctl_cmd_entry *entry,
 				struct ctl_scsiio *ctsio);
@@ -2274,6 +2277,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
 	union ctl_ha_msg msg_info;
 	struct ctl_lun *lun;
 	const struct ctl_cmd_entry *entry;
+	union ctl_io *bio;
 	uint32_t targ_lun;
 
 	targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
@@ -2332,12 +2336,11 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
 #endif
 	TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
 
-	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
-		(union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
-		 ooa_links))) {
+	bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links);
+	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
 	case CTL_ACTION_BLOCK:
-		ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
-		TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
+		ctsio->io_hdr.blocker = bio;
+		TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
 				  blocked_links);
 		mtx_unlock(&lun->lun_lock);
 		break;
@@ -2419,7 +2422,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_
 #endif
 		bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
 		entry->cdb_len = io->scsiio.cdb_len;
-		if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
+		if (io->io_hdr.blocker != NULL)
 			entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
 
 		if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
@@ -3914,6 +3917,7 @@ ctl_alloc_io(void *pool_ref)
 	if (io != NULL) {
 		io->io_hdr.pool = pool_ref;
 		CTL_SOFTC(io) = pool->ctl_softc;
+		TAILQ_INIT(&io->io_hdr.blocked_queue);
 	}
 	return (io);
 }
@@ -3928,6 +3932,7 @@ ctl_alloc_io_nowait(void *pool_ref)
 	if (io != NULL) {
 		io->io_hdr.pool = pool_ref;
 		CTL_SOFTC(io) = pool->ctl_softc;
+		TAILQ_INIT(&io->io_hdr.blocked_queue);
 	}
 	return (io);
 }
@@ -3959,6 +3964,7 @@ ctl_zero_io(union ctl_io *io)
 	memset(io, 0, sizeof(*io));
 	io->io_hdr.pool = pool;
 	CTL_SOFTC(io) = pool->ctl_softc;
+	TAILQ_INIT(&io->io_hdr.blocked_queue);
 }
 
 int
@@ -4719,7 +4725,6 @@ fail:
 	lun->last_busy = getsbinuptime();
 #endif
 	TAILQ_INIT(&lun->ooa_queue);
-	TAILQ_INIT(&lun->blocked_queue);
 	STAILQ_INIT(&lun->error_list);
 	lun->ie_reported = 1;
 	callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
@@ -5906,7 +5911,7 @@ ctl_unmap(struct ctl_scsiio *ctsio)
 	ptrlen->ptr = (void *)buf;
 	ptrlen->len = len;
 	ptrlen->flags = byte2;
-	ctl_check_blocked(lun);
+	ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE);
 	mtx_unlock(&lun->lun_lock);
 
 	retval = lun->backend->config_write((union ctl_io *)ctsio);
@@ -10793,6 +10798,14 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_
 	const ctl_serialize_action *serialize_row;
 
 	/*
+	 * Aborted commands are not going to be executed and may even
+	 * not report completion, so we don't care about their order.
+	 * Let them complete ASAP to clean the OOA queue.
+	 */
+	if (pending_io->io_hdr.flags & CTL_FLAG_ABORT)
+		return (CTL_ACTION_SKIP);
+
+	/*
 	 * The initiator attempted multiple untagged commands at the same
 	 * time.  Can't do that.
 	 */
@@ -10922,7 +10935,7 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_
  */
 static ctl_action
 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
-	      union ctl_io *starting_io)
+	      union ctl_io **starting_io)
 {
 	union ctl_io *ooa_io;
 	ctl_action action;
@@ -10935,150 +10948,152 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pendi
 	 * queue.  If starting_io is NULL, we'll just end up returning
 	 * CTL_ACTION_PASS.
 	 */
-	for (ooa_io = starting_io; ooa_io != NULL;
+	for (ooa_io = *starting_io; ooa_io != NULL;
 	     ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
 	     ooa_links)){
-
-		/*
-		 * This routine just checks to see whether
-		 * cur_blocked is blocked by ooa_io, which is ahead
-		 * of it in the queue.  It doesn't queue/dequeue
-		 * cur_blocked.
-		 */
 		action = ctl_check_for_blockage(lun, pending_io, ooa_io);
-		switch (action) {
-		case CTL_ACTION_BLOCK:
-		case CTL_ACTION_OVERLAP:
-		case CTL_ACTION_OVERLAP_TAG:
-		case CTL_ACTION_SKIP:
-		case CTL_ACTION_ERROR:
+		if (action != CTL_ACTION_PASS) {
+			*starting_io = ooa_io;
 			return (action);
-			break; /* NOTREACHED */
-		case CTL_ACTION_PASS:
-			break;
-		default:
-			panic("%s: Invalid action %d\n", __func__, action);
 		}
 	}
 
+	*starting_io = NULL;
 	return (CTL_ACTION_PASS);
 }
 
 /*
- * Assumptions:
- * - An I/O has just completed, and has been removed from the per-LUN OOA
- *   queue, so some items on the blocked queue may now be unblocked.
+ * Try to unblock the specified I/O.
+ *
+ * skip parameter allows explicitly skip present blocker of the I/O,
+ * starting from the previous one on OOA queue.  It can be used when
+ * we know for sure that the blocker I/O does no longer count.
  */
-static int
-ctl_check_blocked(struct ctl_lun *lun)
+static void
+ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip)
 {
 	struct ctl_softc *softc = lun->ctl_softc;
-	union ctl_io *cur_blocked, *next_blocked;
+	union ctl_io *bio, *obio;
+	const struct ctl_cmd_entry *entry;
+	union ctl_ha_msg msg_info;
+	ctl_action action;
 
 	mtx_assert(&lun->lun_lock, MA_OWNED);
 
-	/*
-	 * Run forward from the head of the blocked queue, checking each
-	 * entry against the I/Os prior to it on the OOA queue to see if
-	 * there is still any blockage.
-	 *
-	 * We cannot use the TAILQ_FOREACH() macro, because it can't deal
-	 * with our removing a variable on it while it is traversing the
-	 * list.
-	 */
-	for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
-	     cur_blocked != NULL; cur_blocked = next_blocked) {
-		union ctl_io *prev_ooa;
-		ctl_action action;
+	if (io->io_hdr.blocker == NULL)
+		return;
 
-		next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
-							  blocked_links);
+	obio = bio = io->io_hdr.blocker;
+	if (skip)
+		bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq,
+		    ooa_links);
+	action = ctl_check_ooa(lun, io, &bio);
+	if (action == CTL_ACTION_BLOCK) {
+		/* Still blocked, but may be by different I/O now. */
+		if (bio != obio) {
+			TAILQ_REMOVE(&obio->io_hdr.blocked_queue,
+			    &io->io_hdr, blocked_links);
+			TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue,
+			    &io->io_hdr, blocked_links);
+			io->io_hdr.blocker = bio;
+		}
+		return;
+	}
 
-		prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
-						      ctl_ooaq, ooa_links);
+	/* No longer blocked, one way or another. */
+	TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links);
+	io->io_hdr.blocker = NULL;
 
+	switch (action) {
+	case CTL_ACTION_OVERLAP:
+		ctl_set_overlapped_cmd(&io->scsiio);
+		goto error;
+	case CTL_ACTION_OVERLAP_TAG:
+		ctl_set_overlapped_tag(&io->scsiio,
+		    io->scsiio.tag_num & 0xff);
+		goto error;
+	case CTL_ACTION_PASS:
+	case CTL_ACTION_SKIP:
+
+		/* Serializing commands from the other SC retire there. */
+		if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
+		    (softc->ha_mode != CTL_HA_MODE_XFER)) {
+			io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+			msg_info.hdr.original_sc = io->io_hdr.remote_io;
+			msg_info.hdr.serializing_sc = io;
+			msg_info.hdr.msg_type = CTL_MSG_R2R;
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+			    sizeof(msg_info.hdr), M_NOWAIT);
+			break;
+		}
+
 		/*
-		 * If cur_blocked happens to be the first item in the OOA
-		 * queue now, prev_ooa will be NULL, and the action
-		 * returned will just be CTL_ACTION_PASS.
+		 * Check this I/O for LUN state changes that may have happened
+		 * while this command was blocked. The LUN state may have been
+		 * changed by a command ahead of us in the queue.
 		 */
-		action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
-
-		switch (action) {
-		case CTL_ACTION_BLOCK:
-			/* Nothing to do here, still blocked */
+		entry = ctl_get_cmd_entry(&io->scsiio, NULL);
+		if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
+			ctl_done(io);
 			break;
-		case CTL_ACTION_OVERLAP:
-		case CTL_ACTION_OVERLAP_TAG:
-			/*
-			 * This shouldn't happen!  In theory we've already
-			 * checked this command for overlap...
-			 */
-			break;
-		case CTL_ACTION_PASS:
-		case CTL_ACTION_SKIP: {
-			const struct ctl_cmd_entry *entry;
+		}
 
-			/*
-			 * The skip case shouldn't happen, this transaction
-			 * should have never made it onto the blocked queue.
-			 */
-			/*
-			 * This I/O is no longer blocked, we can remove it
-			 * from the blocked queue.  Since this is a TAILQ
-			 * (doubly linked list), we can do O(1) removals
-			 * from any place on the list.
-			 */
-			TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
-				     blocked_links);
-			cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
+		io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+		ctl_enqueue_rtr(io);
+		break;
+	case CTL_ACTION_ERROR:
+	default:
+		ctl_set_internal_failure(&io->scsiio,
+					 /*sks_valid*/ 0,
+					 /*retry_count*/ 0);
 
-			if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
-			    (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
-				/*
-				 * Need to send IO back to original side to
-				 * run
-				 */
-				union ctl_ha_msg msg_info;
+error:
+		/* Serializing commands from the other SC are done here. */
+		if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
+		    (softc->ha_mode != CTL_HA_MODE_XFER)) {
+			ctl_try_unblock_others(lun, io, TRUE);
+			TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
 
-				cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
-				msg_info.hdr.original_sc =
-					cur_blocked->io_hdr.remote_io;
-				msg_info.hdr.serializing_sc = cur_blocked;
-				msg_info.hdr.msg_type = CTL_MSG_R2R;
-				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
-				    sizeof(msg_info.hdr), M_NOWAIT);
-				break;
-			}
-			entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
-
-			/*
-			 * Check this I/O for LUN state changes that may
-			 * have happened while this command was blocked.
-			 * The LUN state may have been changed by a command
-			 * ahead of us in the queue, so we need to re-check
-			 * for any states that can be caused by SCSI
-			 * commands.
-			 */
-			if (ctl_scsiio_lun_check(lun, entry,
-						 &cur_blocked->scsiio) == 0) {
-				cur_blocked->io_hdr.flags |=
-				                      CTL_FLAG_IS_WAS_ON_RTR;
-				ctl_enqueue_rtr(cur_blocked);
-			} else
-				ctl_done(cur_blocked);
+			ctl_copy_sense_data_back(io, &msg_info);
+			msg_info.hdr.original_sc = io->io_hdr.remote_io;
+			msg_info.hdr.serializing_sc = NULL;
+			msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+			ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+			    sizeof(msg_info.scsi), M_WAITOK);
+			ctl_free_io(io);
 			break;
 		}
-		default:
-			/*
-			 * This probably shouldn't happen -- we shouldn't
-			 * get CTL_ACTION_ERROR, or anything else.
-			 */
-			break;
-		}
+
+		ctl_done(io);
+		break;
 	}
+}
 
-	return (CTL_RETVAL_COMPLETE);
+/*
+ * Try to unblock I/Os blocked by the specified I/O.
+ *
+ * skip parameter allows explicitly skip the specified I/O as blocker,
+ * starting from the previous one on the OOA queue.  It can be used when
+ * we know for sure that the specified I/O does no longer count (done).
+ * It has to be still on OOA queue though so that we know where to start.
+ */
+static void
+ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip)
+{
+	union ctl_io *io, *next_io;
+
+	mtx_assert(&lun->lun_lock, MA_OWNED);
+
+	for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue);
+	     io != NULL; io = next_io) {
+		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links);
+
+		KASSERT(io->io_hdr.blocker != NULL,
+		    ("I/O %p on blocked list without blocker", io));
+		ctl_try_unblock_io(lun, io, skip);
+	}
+	KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue),
+	    ("blocked_queue is not empty after skipping %p", bio));
 }
 
 /*
@@ -11246,6 +11261,8 @@ ctl_failover_lun(union ctl_io *rio)
 				if (io->flags & CTL_FLAG_IO_ACTIVE) {
 					io->flags |= CTL_FLAG_ABORT;
 					io->flags |= CTL_FLAG_FAILOVER;
+					ctl_try_unblock_io(lun,
+					    (union ctl_io *)io, FALSE);
 				} else { /* This can be only due to DATAMOVE */
 					io->msg_type = CTL_MSG_DATAMOVE_DONE;
 					io->flags &= ~CTL_FLAG_DMA_INPROG;
@@ -11253,7 +11270,7 @@ ctl_failover_lun(union ctl_io *rio)
 					io->port_status = 31340;
 					ctl_enqueue_isc((union ctl_io *)io);
 				}
-			}
+			} else
 			/* We are slave */
 			if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
 				io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
@@ -11267,23 +11284,19 @@ ctl_failover_lun(union ctl_io *rio)
 			}
 		}
 	} else { /* SERIALIZE modes */
-		TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
-		    next_io) {
-			/* We are master */
-			if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
-				TAILQ_REMOVE(&lun->blocked_queue, io,
-				    blocked_links);
-				io->flags &= ~CTL_FLAG_BLOCKED;
-				TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
-				ctl_free_io((union ctl_io *)io);
-			}
-		}
 		TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
 			/* We are master */
 			if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+				if (io->blocker != NULL) {
+					TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue,
+					    io, blocked_links);
+					io->blocker = NULL;
+				}
+				ctl_try_unblock_others(lun, (union ctl_io *)io,
+				    TRUE);
 				TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
 				ctl_free_io((union ctl_io *)io);
-			}
+			} else
 			/* We are slave */
 			if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
 				io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
@@ -11294,7 +11307,6 @@ ctl_failover_lun(union ctl_io *rio)
 				}
 			}
 		}
-		ctl_check_blocked(lun);
 	}
 	mtx_unlock(&lun->lun_lock);
 }
@@ -11304,6 +11316,7 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ct
 {
 	struct ctl_lun *lun;
 	const struct ctl_cmd_entry *entry;
+	union ctl_io *bio;
 	uint32_t initidx, targ_lun;
 	int retval = 0;
 
@@ -11479,12 +11492,11 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ct
 		return (retval);
 	}
 
-	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
-			      (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
-			      ctl_ooaq, ooa_links))) {
+	bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links);
+	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
 	case CTL_ACTION_BLOCK:
-		ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
-		TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
+		ctsio->io_hdr.blocker = bio;
+		TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
 				  blocked_links);
 		mtx_unlock(&lun->lun_lock);
 		return (retval);
@@ -11697,6 +11709,7 @@ ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx
 	for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
 	     xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
 		xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
+		ctl_try_unblock_io(lun, xio, FALSE);
 	}
 	/* Clear CA. */
 	for (i = 0; i < ctl_max_ports; i++) {
@@ -11795,6 +11808,7 @@ ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ
 				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
 				    sizeof(msg_info.task), M_NOWAIT);
 			}
+			ctl_try_unblock_io(lun, xio, FALSE);
 		}
 	}
 }
@@ -11967,6 +11981,7 @@ ctl_abort_task(union ctl_io *io)
 				ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
 				    sizeof(msg_info.task), M_NOWAIT);
 			}
+			ctl_try_unblock_io(lun, xio, FALSE);
 		}
 	}
 	mtx_unlock(&lun->lun_lock);
@@ -12142,8 +12157,8 @@ ctl_handle_isc(union ctl_io *io)
 			break;
 		}
 		mtx_lock(&lun->lun_lock);
+		ctl_try_unblock_others(lun, io, TRUE);
 		TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
-		ctl_check_blocked(lun);
 		mtx_unlock(&lun->lun_lock);
 		ctl_free_io(io);
 		break;
@@ -12982,6 +12997,13 @@ ctl_process_done(union ctl_io *io)
 	}
 
 	/*
+	 * Run through the blocked queue of this I/O and see if anything
+	 * can be unblocked, now that this I/O is done and will be removed.
+	 * We need to do it before removal to have OOA position to start.
+	 */
+	ctl_try_unblock_others(lun, io, TRUE);
+
+	/*
 	 * Remove this from the OOA queue.
 	 */
 	TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
@@ -12991,12 +13013,6 @@ ctl_process_done(union ctl_io *io)
 #endif
 
 	/*
-	 * Run through the blocked queue on this LUN and see if anything
-	 * has become unblocked, now that this transaction is done.
-	 */
-	ctl_check_blocked(lun);
-
-	/*
 	 * If the LUN has been invalidated, free it if there is nothing
 	 * left on its OOA queue.
 	 */
@@ -13151,7 +13167,7 @@ ctl_serseq_done(union ctl_io *io)
 		return;
 	mtx_lock(&lun->lun_lock);
 	io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
-	ctl_check_blocked(lun);
+	ctl_try_unblock_others(lun, io, FALSE);
 	mtx_unlock(&lun->lun_lock);
 }
 

Modified: stable/11/sys/cam/ctl/ctl_frontend_ioctl.c
==============================================================================
--- stable/11/sys/cam/ctl/ctl_frontend_ioctl.c	Wed Mar 13 20:27:48 2019	(r345113)
+++ stable/11/sys/cam/ctl/ctl_frontend_ioctl.c	Wed Mar 13 20:28:07 2019	(r345114)
@@ -413,6 +413,7 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t add
 	memcpy(io, (void *)addr, sizeof(*io));
 	io->io_hdr.pool = pool_tmp;
 	CTL_SOFTC(io) = sc_tmp;
+	TAILQ_INIT(&io->io_hdr.blocked_queue);
 
 	/*
 	 * No status yet, so make sure the status is set properly.

Modified: stable/11/sys/cam/ctl/ctl_io.h
==============================================================================
--- stable/11/sys/cam/ctl/ctl_io.h	Wed Mar 13 20:27:48 2019	(r345113)
+++ stable/11/sys/cam/ctl/ctl_io.h	Wed Mar 13 20:28:07 2019	(r345114)
@@ -85,7 +85,6 @@ typedef enum {
 	CTL_FLAG_DO_AUTOSENSE	= 0x00000020,	/* grab sense info */
 	CTL_FLAG_USER_REQ	= 0x00000040,	/* request came from userland */
 	CTL_FLAG_ALLOCATED	= 0x00000100,	/* data space allocated */
-	CTL_FLAG_BLOCKED	= 0x00000200,	/* on the blocked queue */
 	CTL_FLAG_ABORT_STATUS	= 0x00000400,	/* return TASK ABORTED status */
 	CTL_FLAG_ABORT		= 0x00000800,	/* this I/O should be aborted */
 	CTL_FLAG_DMA_INPROG	= 0x00001000,	/* DMA in progress */
@@ -237,14 +236,13 @@ struct ctl_io_hdr {
 #endif /* CTL_TIME_IO */
 	uint32_t	  num_dmas;	/* Number of DMAs */
 	union ctl_io	  *remote_io;	/* I/O counterpart on remote HA side */
-	void		  *pad1;
+	union ctl_io	  *blocker;	/* I/O blocking this one */
 	void		  *pool;	/* I/O pool */
 	union ctl_priv	  ctl_private[CTL_NUM_PRIV];/* CTL private area */
-	void		  *pad2;
-	void		  *pad3;
+	TAILQ_HEAD(, ctl_io_hdr) blocked_queue;	/* I/Os blocked by this one */
 	STAILQ_ENTRY(ctl_io_hdr) links;	/* linked list pointer */
-	TAILQ_ENTRY(ctl_io_hdr) ooa_links;
-	TAILQ_ENTRY(ctl_io_hdr) blocked_links;
+	TAILQ_ENTRY(ctl_io_hdr) ooa_links;	/* ooa_queue links */
+	TAILQ_ENTRY(ctl_io_hdr) blocked_links;	/* blocked_queue links */
 };
 
 typedef enum {

Modified: stable/11/sys/cam/ctl/ctl_private.h
==============================================================================
--- stable/11/sys/cam/ctl/ctl_private.h	Wed Mar 13 20:27:48 2019	(r345113)
+++ stable/11/sys/cam/ctl/ctl_private.h	Wed Mar 13 20:28:07 2019	(r345114)
@@ -388,7 +388,6 @@ struct ctl_lun {
 	sbintime_t			last_busy;
 #endif
 	TAILQ_HEAD(ctl_ooaq, ctl_io_hdr)  ooa_queue;
-	TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue;
 	STAILQ_ENTRY(ctl_lun)		links;
 	struct scsi_sense_data		**pending_sense;
 	ctl_ua_type			**pending_ua;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201903132028.x2DKS7m1040314>