Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 12 Jul 2014 04:34:39 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r268556 - stable/10/sys/cam/ctl
Message-ID:  <201407120434.s6C4Ydjm008784@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Sat Jul 12 04:34:39 2014
New Revision: 268556
URL: http://svnweb.freebsd.org/changeset/base/268556

Log:
  MFC r267643, r267873, r268391, r268398:
  Introduce fine-grained CTL locking to improve SMP scalability.
  
  Split global ctl_lock, historically protecting most of CTL context:
   - remaining ctl_lock now protects lists of fronends and backends;
   - per-LUN lun_lock(s) protect LUN-specific information;
   - per-thread queue_lock(s) protect request queues.
  This allows to radically reduce congestion on ctl_lock.
  
  Create multiple worker threads, depending on number of CPUs, and assign
  each LUN to one of them.  This allows to spread load between multiple CPUs,
  still avoiging congestion on queues and LUNs locks.
  
  On 40-core server, exporting 5 LUNs, each backed by gstripe of SATA SSDs,
  accessed via 6 iSCSI connections, this change improves peak request rate
  from 250K to 680K IOPS.
  
  Sponsored by:	iXsystems, Inc.

Modified:
  stable/10/sys/cam/ctl/ctl.c
  stable/10/sys/cam/ctl/ctl.h
  stable/10/sys/cam/ctl/ctl_io.h
  stable/10/sys/cam/ctl/ctl_private.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/cam/ctl/ctl.c
==============================================================================
--- stable/10/sys/cam/ctl/ctl.c	Sat Jul 12 02:40:25 2014	(r268555)
+++ stable/10/sys/cam/ctl/ctl.c	Sat Jul 12 04:34:39 2014	(r268556)
@@ -83,14 +83,6 @@ __FBSDID("$FreeBSD$");
 struct ctl_softc *control_softc = NULL;
 
 /*
- * The default is to run with CTL_DONE_THREAD turned on.  Completed
- * transactions are queued for processing by the CTL work thread.  When
- * CTL_DONE_THREAD is not defined, completed transactions are processed in
- * the caller's context.
- */
-#define CTL_DONE_THREAD
-
-/*
  * Use the serial number and device ID provided by the backend, rather than
  * making up our own.
  */
@@ -321,7 +313,7 @@ static int     ctl_is_single = 1;
 static int     index_to_aps_page;
 
 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
-static int worker_threads = 1;
+static int worker_threads = -1;
 TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
     &worker_threads, 1, "Number of worker threads");
@@ -350,7 +342,7 @@ static int ctl_ioctl_targ_disable(void *
 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
-static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
+static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
 static int ctl_ioctl_submit_wait(union ctl_io *io);
 static void ctl_ioctl_datamove(union ctl_io *io);
 static void ctl_ioctl_done(union ctl_io *io);
@@ -420,7 +412,7 @@ static int ctl_target_reset(struct ctl_s
 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
 			 ctl_ua_type ua_type);
 static int ctl_abort_task(union ctl_io *io);
-static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
+static void ctl_run_task(union ctl_io *io);
 #ifdef CTL_IO_DELAY
 static void ctl_datamove_timer_wakeup(void *arg);
 static void ctl_done_timer_wakeup(void *arg);
@@ -437,8 +429,13 @@ static int ctl_datamove_remote_xfer(unio
 				    ctl_ha_dt_cb callback);
 static void ctl_datamove_remote_read(union ctl_io *io);
 static void ctl_datamove_remote(union ctl_io *io);
-static int ctl_process_done(union ctl_io *io, int have_lock);
+static int ctl_process_done(union ctl_io *io);
+static void ctl_lun_thread(void *arg);
 static void ctl_work_thread(void *arg);
+static void ctl_enqueue_incoming(union ctl_io *io);
+static void ctl_enqueue_rtr(union ctl_io *io);
+static void ctl_enqueue_done(union ctl_io *io);
+static void ctl_enqueue_isc(union ctl_io *io);
 
 /*
  * Load the serialization table.  This isn't very pretty, but is probably
@@ -497,8 +494,7 @@ ctl_isc_handler_finish_xfer(struct ctl_s
 	       sizeof(ctsio->sense_data));
 	memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
 	       &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
-	STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
-	ctl_wakeup_thread();
+	ctl_enqueue_isc((union ctl_io *)ctsio);
 }
 
 static void
@@ -544,8 +540,7 @@ ctl_isc_handler_finish_ser_only(struct c
 	}
 #endif
 	ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
-	STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
-	ctl_wakeup_thread();
+	ctl_enqueue_isc((union ctl_io *)ctsio);
 }
 
 /*
@@ -580,7 +575,6 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			       isc_status);
 			return;
 		}
-		mtx_lock(&ctl_softc->ctl_lock);
 
 		switch (msg_info.hdr.msg_type) {
 		case CTL_MSG_SERIALIZE:
@@ -593,7 +587,6 @@ ctl_isc_event_handler(ctl_ha_channel cha
 				       "ctl_io!\n");
 				/* Bad Juju */
 				/* Need to set busy and send msg back */
-				mtx_unlock(&ctl_softc->ctl_lock);
 				msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
 				msg_info.hdr.status = CTL_SCSI_ERROR;
 				msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
@@ -644,9 +637,7 @@ ctl_isc_event_handler(ctl_ha_channel cha
 				io->io_hdr.flags |=
 					entry->flags & CTL_FLAG_DATA_MASK;
 			}
-			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc(io);
 			break;
 
 		/* Performed on the Originating SC, XFER mode only */
@@ -750,11 +741,8 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			 * the full S/G list.  Queue processing in the thread.
 			 * Otherwise wait for the next piece.
 			 */
-			if (msg_info.dt.sg_last != 0) {
-				STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-						   &io->io_hdr, links);
-				ctl_wakeup_thread();
-			}
+			if (msg_info.dt.sg_last != 0)
+				ctl_enqueue_isc(io);
 			break;
 		}
 		/* Performed on the Serializing (primary) SC, XFER mode only */
@@ -780,10 +768,7 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			io->scsiio.residual = msg_info.scsi.residual;
 			memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
 			       sizeof(io->scsiio.sense_data));
-
-			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc(io);
 			break;
 		}
 
@@ -792,7 +777,6 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			io = msg_info.hdr.original_sc;
 			if (io == NULL) {
 				printf("%s: Major Bummer\n", __func__);
-				mtx_unlock(&ctl_softc->ctl_lock);
 				return;
 			} else {
 #if 0
@@ -801,9 +785,7 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			}
 			io->io_hdr.msg_type = CTL_MSG_R2R;
 			io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
-			STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc(io);
 			break;
 
 		/*
@@ -840,9 +822,7 @@ ctl_isc_event_handler(ctl_ha_channel cha
 
 			/* io = msg_info.hdr.serializing_sc; */
 			io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
-		        STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &io->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc(io);
 			break;
 
 		/* Handle resets sent from the other side */
@@ -856,7 +836,6 @@ ctl_isc_event_handler(ctl_ha_channel cha
 				/* Bad Juju */
 				/* should I just call the proper reset func
 				   here??? */
-				mtx_unlock(&ctl_softc->ctl_lock);
 				goto bailout;
 			}
 			ctl_zero_io((union ctl_io *)taskio);
@@ -873,10 +852,7 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			cs_prof_gettime(&taskio->io_hdr.start_ticks);
 #endif
 #endif /* CTL_TIME_IO */
-		        STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
-					   &taskio->io_hdr, links);
-			ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
-			ctl_wakeup_thread();
+			ctl_run_task((union ctl_io *)taskio);
 			break;
 		}
 		/* Persistent Reserve action which needs attention */
@@ -888,15 +864,12 @@ ctl_isc_event_handler(ctl_ha_channel cha
 				       "ctl_io!\n");
 				/* Bad Juju */
 				/* Need to set busy and send msg back */
-				mtx_unlock(&ctl_softc->ctl_lock);
 				goto bailout;
 			}
 			ctl_zero_io((union ctl_io *)presio);
 			presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
 			presio->pr_msg = msg_info.pr;
-		        STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
-					   &presio->io_hdr, links);
-			ctl_wakeup_thread();
+			ctl_enqueue_isc((union ctl_io *)presio);
 			break;
 		case CTL_MSG_SYNC_FE:
 			rcv_sync_msg = 1;
@@ -909,23 +882,21 @@ ctl_isc_event_handler(ctl_ha_channel cha
 			struct copan_aps_subpage *current_sp;
 			uint32_t targ_lun;
 
-			targ_lun = msg_info.hdr.nexus.targ_lun;
-			if (msg_info.hdr.nexus.lun_map_fn != NULL)
-				targ_lun = msg_info.hdr.nexus.lun_map_fn(msg_info.hdr.nexus.lun_map_arg, targ_lun);
-
+			targ_lun = msg_info.hdr.nexus.targ_mapped_lun;
 			lun = ctl_softc->ctl_luns[targ_lun];
+			mtx_lock(&lun->lun_lock);
 			page_index = &lun->mode_pages.index[index_to_aps_page];
 			current_sp = (struct copan_aps_subpage *)
 				     (page_index->page_data +
 				     (page_index->page_len * CTL_PAGE_CURRENT));
 
 			current_sp->lock_active = msg_info.aps.lock_flag;
+			mtx_unlock(&lun->lun_lock);
 		        break;
 		}
 		default:
 		        printf("How did I get here?\n");
 		}
-		mtx_unlock(&ctl_softc->ctl_lock);
 	} else if (event == CTL_HA_EVT_MSG_SENT) {
 		if (param != CTL_HA_STATUS_SUCCESS) {
 			printf("Bad status from ctl_ha_msg_send status %d\n",
@@ -1040,20 +1011,10 @@ ctl_init(void)
 	softc->target.wwid[1] = 0x87654321;
 	STAILQ_INIT(&softc->lun_list);
 	STAILQ_INIT(&softc->pending_lun_queue);
-	STAILQ_INIT(&softc->task_queue);
-	STAILQ_INIT(&softc->incoming_queue);
-	STAILQ_INIT(&softc->rtr_queue);
-	STAILQ_INIT(&softc->done_queue);
-	STAILQ_INIT(&softc->isc_queue);
 	STAILQ_INIT(&softc->fe_list);
 	STAILQ_INIT(&softc->be_list);
 	STAILQ_INIT(&softc->io_pools);
 
-	/*
-	 * We don't bother calling these with ctl_lock held here, because,
-	 * in theory, no one else can try to do anything while we're in our
-	 * module init routine.
-	 */
 	if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
 			    &internal_pool)!= 0){
 		printf("ctl: can't allocate %d entry internal pool, "
@@ -1083,25 +1044,23 @@ ctl_init(void)
 	softc->emergency_pool = emergency_pool;
 	softc->othersc_pool = other_pool;
 
-	if (worker_threads > MAXCPU || worker_threads == 0) {
-		printf("invalid kern.cam.ctl.worker_threads value; "
-		    "setting to 1");
-		worker_threads = 1;
-	} else if (worker_threads < 0) {
-		if (mp_ncpus > 2) {
-			/*
-			 * Using more than two worker threads actually hurts
-			 * performance due to lock contention.
-			 */
-			worker_threads = 2;
-		} else {
-			worker_threads = 1;
-		}
-	}
+	if (worker_threads <= 0)
+		worker_threads = max(1, mp_ncpus / 4);
+	if (worker_threads > CTL_MAX_THREADS)
+		worker_threads = CTL_MAX_THREADS;
 
 	for (i = 0; i < worker_threads; i++) {
-		error = kproc_kthread_add(ctl_work_thread, softc,
-		    &softc->work_thread, NULL, 0, 0, "ctl", "work%d", i);
+		struct ctl_thread *thr = &softc->threads[i];
+
+		mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
+		thr->ctl_softc = softc;
+		STAILQ_INIT(&thr->incoming_queue);
+		STAILQ_INIT(&thr->rtr_queue);
+		STAILQ_INIT(&thr->done_queue);
+		STAILQ_INIT(&thr->isc_queue);
+
+		error = kproc_kthread_add(ctl_work_thread, thr,
+		    &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
 		if (error != 0) {
 			printf("error creating CTL work thread!\n");
 			ctl_pool_free(internal_pool);
@@ -1110,6 +1069,15 @@ ctl_init(void)
 			return (error);
 		}
 	}
+	error = kproc_kthread_add(ctl_lun_thread, softc,
+	    &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
+	if (error != 0) {
+		printf("error creating CTL lun thread!\n");
+		ctl_pool_free(internal_pool);
+		ctl_pool_free(emergency_pool);
+		ctl_pool_free(other_pool);
+		return (error);
+	}
 	if (bootverbose)
 		printf("ctl: CAM Target Layer loaded\n");
 
@@ -1193,6 +1161,7 @@ ctl_shutdown(void)
 
 #if 0
 	ctl_shutdown_thread(softc->work_thread);
+	mtx_destroy(&softc->queue_lock);
 #endif
 
 	mtx_destroy(&softc->pool_lock);
@@ -1684,7 +1653,7 @@ bailout:
  * (SER_ONLY mode).
  */
 static int
-ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
+ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
 {
 	struct ctl_softc *ctl_softc;
 	union ctl_ha_msg msg_info;
@@ -1693,12 +1662,8 @@ ctl_serialize_other_sc_cmd(struct ctl_sc
 	uint32_t targ_lun;
 
 	ctl_softc = control_softc;
-	if (have_lock == 0)
-		mtx_lock(&ctl_softc->ctl_lock);
 
-	targ_lun = ctsio->io_hdr.nexus.targ_lun;
-	if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
-		targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
+	targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
 	lun = ctl_softc->ctl_luns[targ_lun];
 	if (lun==NULL)
 	{
@@ -1727,12 +1692,11 @@ ctl_serialize_other_sc_cmd(struct ctl_sc
 	        if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
 				sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
 		}
-		if (have_lock == 0)
-			mtx_unlock(&ctl_softc->ctl_lock);
 		return(1);
 
 	}
 
+	mtx_lock(&lun->lun_lock);
     	TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
 
 	switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
@@ -1747,8 +1711,7 @@ ctl_serialize_other_sc_cmd(struct ctl_sc
 	case CTL_ACTION_SKIP:
 		if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
 			ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
-			STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
-					   &ctsio->io_hdr, links);
+			ctl_enqueue_rtr((union ctl_io *)ctsio);
 		} else {
 
 			/* send msg back to other side */
@@ -1843,8 +1806,7 @@ ctl_serialize_other_sc_cmd(struct ctl_sc
 		}
 		break;
 	}
-	if (have_lock == 0)
-		mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 	return (retval);
 }
 
@@ -2003,8 +1965,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, 
 
 	retval = 0;
 
-	mtx_assert(&control_softc->ctl_lock, MA_OWNED);
-
+	mtx_lock(&lun->lun_lock);
 	for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
 	     (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
 	     ooa_links)) {
@@ -2041,6 +2002,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, 
 		if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
 			entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
 	}
+	mtx_unlock(&lun->lun_lock);
 
 	return (retval);
 }
@@ -2413,6 +2375,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 		mtx_lock(&softc->ctl_lock);
 		printf("Dumping OOA queues:\n");
 		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			mtx_lock(&lun->lun_lock);
 			for (io = (union ctl_io *)TAILQ_FIRST(
 			     &lun->ooa_queue); io != NULL;
 			     io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
@@ -2434,6 +2397,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 				sbuf_finish(&sb);
 				printf("%s\n", sbuf_data(&sb));
 			}
+			mtx_unlock(&lun->lun_lock);
 		}
 		printf("OOA queues dump done\n");
 		mtx_unlock(&softc->ctl_lock);
@@ -2549,15 +2513,16 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 			ooa_info->status = CTL_OOA_INVALID_LUN;
 			break;
 		}
-
+		mtx_lock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 		ooa_info->num_entries = 0;
 		for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
 		     io != NULL; io = (union ctl_io *)TAILQ_NEXT(
 		     &io->io_hdr, ooa_links)) {
 			ooa_info->num_entries++;
 		}
+		mtx_unlock(&lun->lun_lock);
 
-		mtx_unlock(&softc->ctl_lock);
 		ooa_info->status = CTL_OOA_SUCCESS;
 
 		break;
@@ -2675,6 +2640,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 			delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
 		} else {
 			lun = softc->ctl_luns[delay_info->lun_id];
+			mtx_lock(&lun->lun_lock);
 
 			delay_info->status = CTL_DELAY_STATUS_OK;
 
@@ -2707,6 +2673,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 					CTL_DELAY_STATUS_INVALID_LOC;
 				break;
 			}
+			mtx_unlock(&lun->lun_lock);
 		}
 
 		mtx_unlock(&softc->ctl_lock);
@@ -2767,12 +2734,13 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 		 * in the set case, hopefully the user won't do something
 		 * silly.
 		 */
+		mtx_lock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 		if (cmd == CTL_GETSYNC)
 			sync_info->sync_interval = lun->sync_interval;
 		else
 			lun->sync_interval = sync_info->sync_interval;
-
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 
 		sync_info->status = CTL_GS_SYNC_OK;
 
@@ -2833,6 +2801,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 			retval = EINVAL;
 			break;
 		}
+		mtx_lock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 
 		/*
 		 * We could do some checking here to verify the validity
@@ -2855,7 +2825,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 		err_desc->serial = lun->error_serial;
 		lun->error_serial++;
 
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		break;
 	}
 	case CTL_ERROR_INJECT_DELETE: {
@@ -2875,6 +2845,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 			retval = EINVAL;
 			break;
 		}
+		mtx_lock(&lun->lun_lock);
+		mtx_unlock(&softc->ctl_lock);
 		STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
 			if (desc->serial != delete_desc->serial)
 				continue;
@@ -2884,7 +2856,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 			free(desc, M_CTL);
 			delete_done = 1;
 		}
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		if (delete_done == 0) {
 			printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
 			       "error serial %ju on LUN %u\n", __func__, 
@@ -3037,8 +3009,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 		sbuf_printf(sb, "<ctllunlist>\n");
 
 		mtx_lock(&softc->ctl_lock);
-
 		STAILQ_FOREACH(lun, &softc->lun_list, links) {
+			mtx_lock(&lun->lun_lock);
 			retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
 					     (uintmax_t)lun->lun);
 
@@ -3130,7 +3102,10 @@ ctl_ioctl(struct cdev *dev, u_long cmd, 
 
 			if (retval != 0)
 				break;
+			mtx_unlock(&lun->lun_lock);
 		}
+		if (lun != NULL)
+			mtx_unlock(&lun->lun_lock);
 		mtx_unlock(&softc->ctl_lock);
 
 		if ((retval != 0)
@@ -3612,49 +3587,9 @@ ctl_free_io(union ctl_io *io)
 	 */
 	if (io->io_hdr.pool != NULL) {
 		struct ctl_io_pool *pool;
-#if 0
-		struct ctl_softc *ctl_softc;
-		union ctl_io *tmp_io;
-		unsigned long xflags;
-		int i;
-
-		ctl_softc = control_softc;
-#endif
 
 		pool = (struct ctl_io_pool *)io->io_hdr.pool;
-
 		mtx_lock(&pool->ctl_softc->pool_lock);
-#if 0
-		save_flags(xflags);
-
-		for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
-		     &ctl_softc->task_queue); tmp_io != NULL; i++,
-		     tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
-		     links)) {
-			if (tmp_io == io) {
-				printf("%s: %p is still on the task queue!\n",
-				       __func__, tmp_io);
-				printf("%s: (%d): type %d "
-				       "msg %d cdb %x iptl: "
-				       "%d:%d:%d:%d tag 0x%04x "
-				       "flg %#lx\n",
-					__func__, i,
-					tmp_io->io_hdr.io_type,
-					tmp_io->io_hdr.msg_type,
-					tmp_io->scsiio.cdb[0],
-					tmp_io->io_hdr.nexus.initid.id,
-					tmp_io->io_hdr.nexus.targ_port,
-					tmp_io->io_hdr.nexus.targ_target.id,
-					tmp_io->io_hdr.nexus.targ_lun,
-					(tmp_io->io_hdr.io_type ==
-					CTL_IO_TASK) ?
-					tmp_io->taskio.tag_num :
-					tmp_io->scsiio.tag_num,
-					xflags);
-				panic("I/O still on the task queue!");
-			}
-		}
-#endif
 		io->io_hdr.io_type = 0xff;
 		STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
 		pool->total_freed++;
@@ -4304,6 +4239,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_soft
 	}
 	ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
 
+	mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
 	lun->target = target_id;
 	lun->lun = lun_number;
 	lun->be_lun = be_lun;
@@ -4423,7 +4359,6 @@ ctl_free_lun(struct ctl_lun *lun)
 	struct ctl_frontend *fe;
 #endif
 	struct ctl_lun *nlun;
-	union ctl_io *io, *next_io;
 	int i;
 
 	softc = lun->ctl_softc;
@@ -4436,49 +4371,8 @@ ctl_free_lun(struct ctl_lun *lun)
 
 	softc->ctl_luns[lun->lun] = NULL;
 
-	if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
-		printf("ctl_free_lun: aieee!! freeing a LUN with "
-		       "outstanding I/O!!\n");
-	}
-
-	/*
-	 * If we have anything pending on the RtR queue, remove it.
-	 */
-	for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
-	     io = next_io) {
-		uint32_t targ_lun;
-
-		next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
-		targ_lun = io->io_hdr.nexus.targ_lun;
-		if (io->io_hdr.nexus.lun_map_fn != NULL)
-			targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
-		if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
-		 && (targ_lun == lun->lun))
-			STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
-				      ctl_io_hdr, links);
-	}
-
-	/*
-	 * Then remove everything from the blocked queue.
-	 */
-	for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
-	     io = next_io) {
-		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
-		TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
-		io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
-	}
-
-	/*
-	 * Now clear out the OOA queue, and free all the I/O.
-	 * XXX KDM should we notify the FETD here?  We probably need to
-	 * quiesce the LUN before deleting it.
-	 */
-	for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
-	     io = next_io) {
-		next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
-		TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
-		ctl_free_io(io);
-	}
+	if (!TAILQ_EMPTY(&lun->ooa_queue))
+		panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
 
 	softc->num_luns--;
 
@@ -4540,6 +4434,7 @@ ctl_free_lun(struct ctl_lun *lun)
 	atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
 	lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
 
+	mtx_destroy(&lun->lun_lock);
 	if (lun->flags & CTL_LUN_MALLOCED)
 		free(lun, M_CTL);
 
@@ -4568,15 +4463,12 @@ ctl_create_lun(struct ctl_be_lun *be_lun
 int
 ctl_add_lun(struct ctl_be_lun *be_lun)
 {
-	struct ctl_softc *ctl_softc;
-
-	ctl_softc = control_softc;
+	struct ctl_softc *ctl_softc = control_softc;
 
 	mtx_lock(&ctl_softc->ctl_lock);
 	STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
 	mtx_unlock(&ctl_softc->ctl_lock);
-
-	ctl_wakeup_thread();
+	wakeup(&ctl_softc->pending_lun_queue);
 
 	return (0);
 }
@@ -4594,15 +4486,18 @@ ctl_enable_lun(struct ctl_be_lun *be_lun
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
 	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	if ((lun->flags & CTL_LUN_DISABLED) == 0) {
 		/*
 		 * eh?  Why did we get called if the LUN is already
 		 * enabled?
 		 */
+		mtx_unlock(&lun->lun_lock);
 		mtx_unlock(&ctl_softc->ctl_lock);
 		return (0);
 	}
 	lun->flags &= ~CTL_LUN_DISABLED;
+	mtx_unlock(&lun->lun_lock);
 
 	for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
 		nfe = STAILQ_NEXT(fe, links);
@@ -4647,12 +4542,14 @@ ctl_disable_lun(struct ctl_be_lun *be_lu
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
 	mtx_lock(&ctl_softc->ctl_lock);
-
+	mtx_lock(&lun->lun_lock);
 	if (lun->flags & CTL_LUN_DISABLED) {
+		mtx_unlock(&lun->lun_lock);
 		mtx_unlock(&ctl_softc->ctl_lock);
 		return (0);
 	}
 	lun->flags |= CTL_LUN_DISABLED;
+	mtx_unlock(&lun->lun_lock);
 
 	STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
 		mtx_unlock(&ctl_softc->ctl_lock);
@@ -4689,9 +4586,9 @@ ctl_start_lun(struct ctl_be_lun *be_lun)
 
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags &= ~CTL_LUN_STOPPED;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4706,9 +4603,9 @@ ctl_stop_lun(struct ctl_be_lun *be_lun)
 
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags |= CTL_LUN_STOPPED;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4723,9 +4620,9 @@ ctl_lun_offline(struct ctl_be_lun *be_lu
 
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags |= CTL_LUN_OFFLINE;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4740,9 +4637,9 @@ ctl_lun_online(struct ctl_be_lun *be_lun
 
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags &= ~CTL_LUN_OFFLINE;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4757,13 +4654,13 @@ ctl_invalidate_lun(struct ctl_be_lun *be
 
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 
 	/*
 	 * The LUN needs to be disabled before it can be marked invalid.
 	 */
 	if ((lun->flags & CTL_LUN_DISABLED) == 0) {
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		return (-1);
 	}
 	/*
@@ -4776,9 +4673,13 @@ ctl_invalidate_lun(struct ctl_be_lun *be
 	 * If we have something in the OOA queue, we'll free it when the
 	 * last I/O completes.
 	 */
-	if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
+	if (TAILQ_EMPTY(&lun->ooa_queue)) {
+		mtx_unlock(&lun->lun_lock);
+		mtx_lock(&ctl_softc->ctl_lock);
 		ctl_free_lun(lun);
-	mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&ctl_softc->ctl_lock);
+	} else
+		mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4792,9 +4693,9 @@ ctl_lun_inoperable(struct ctl_be_lun *be
 	ctl_softc = control_softc;
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags |= CTL_LUN_INOPERABLE;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4808,9 +4709,9 @@ ctl_lun_operable(struct ctl_be_lun *be_l
 	ctl_softc = control_softc;
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags &= ~CTL_LUN_INOPERABLE;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -4830,6 +4731,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be
 	mtx_lock(&softc->ctl_lock);
 
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
+	mtx_lock(&lun->lun_lock);
 
 	page_index = NULL;
 	for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
@@ -4843,6 +4745,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be
 	}
 
 	if (page_index == NULL) {
+		mtx_unlock(&lun->lun_lock);
 		mtx_unlock(&softc->ctl_lock);
 		printf("%s: APS subpage not found for lun %ju!\n", __func__,
 		       (uintmax_t)lun->lun);
@@ -4853,6 +4756,7 @@ ctl_lun_power_lock(struct ctl_be_lun *be
 	 && (softc->aps_locked_lun != lun->lun)) {
 		printf("%s: attempt to lock LUN %llu when %llu is already "
 		       "locked\n");
+		mtx_unlock(&lun->lun_lock);
 		mtx_unlock(&softc->ctl_lock);
 		return (1);
 	}
@@ -4889,11 +4793,13 @@ ctl_lun_power_lock(struct ctl_be_lun *be
 		if (isc_retval > CTL_HA_STATUS_SUCCESS) {
 			printf("%s: APS (lock=%d) error returned from "
 			       "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
+			mtx_unlock(&lun->lun_lock);
 			mtx_unlock(&softc->ctl_lock);
 			return (1);
 		}
 	}
 
+	mtx_unlock(&lun->lun_lock);
 	mtx_unlock(&softc->ctl_lock);
 
 	return (0);
@@ -4908,14 +4814,14 @@ ctl_lun_capacity_changed(struct ctl_be_l
 
 	softc = control_softc;
 
-	mtx_lock(&softc->ctl_lock);
-
 	lun = (struct ctl_lun *)be_lun->ctl_lun;
 
+	mtx_lock(&lun->lun_lock);
+
 	for (i = 0; i < CTL_MAX_INITIATORS; i++) 
 		lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED;
 
-	mtx_unlock(&softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 }
 
 /*
@@ -5156,7 +5062,7 @@ ctl_scsi_release(struct ctl_scsiio *ctsi
 	if (length > 0)
 		thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 
 	/*
 	 * According to SPC, it is not an error for an intiator to attempt
@@ -5174,6 +5080,8 @@ ctl_scsi_release(struct ctl_scsiio *ctsi
 		}
 	}
 
+	mtx_unlock(&lun->lun_lock);
+
 	ctsio->scsi_status = SCSI_STATUS_OK;
 	ctsio->io_hdr.status = CTL_SUCCESS;
 
@@ -5182,8 +5090,6 @@ ctl_scsi_release(struct ctl_scsiio *ctsi
 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
 	}
 
-	mtx_unlock(&ctl_softc->ctl_lock);
-
 	ctl_done((union ctl_io *)ctsio);
 	return (CTL_RETVAL_COMPLETE);
 }
@@ -5291,7 +5197,7 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsi
 	if (length > 0)
 		thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
 
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	if (lun->flags & CTL_LUN_RESERVED) {
 		if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
 		 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
@@ -5310,13 +5216,13 @@ ctl_scsi_reserve(struct ctl_scsiio *ctsi
 	ctsio->io_hdr.status = CTL_SUCCESS;
 
 bailout:
+	mtx_unlock(&lun->lun_lock);
+
 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
 		free(ctsio->kern_data_ptr, M_CTL);
 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
 	}
 
-	mtx_unlock(&ctl_softc->ctl_lock);
-
 	ctl_done((union ctl_io *)ctsio);
 	return (CTL_RETVAL_COMPLETE);
 }
@@ -5425,7 +5331,7 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
 	 * Figure out a reasonable way to port this?
 	 */
 #ifdef NEEDTOPORT
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 
 	if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
 	 && (lun->flags & CTL_LUN_OFFLINE)) {
@@ -5433,11 +5339,11 @@ ctl_start_stop(struct ctl_scsiio *ctsio)
 		 * If the LUN is offline, and the on/offline bit isn't set,
 		 * reject the start or stop.  Otherwise, let it through.
 		 */
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		ctl_set_lun_not_ready(ctsio);
 		ctl_done((union ctl_io *)ctsio);
 	} else {
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 #endif /* NEEDTOPORT */
 		/*
 		 * This could be a start or a stop when we're online,
@@ -5598,14 +5504,14 @@ ctl_sync_cache(struct ctl_scsiio *ctsio)
 	 * Check to see whether we're configured to send the SYNCHRONIZE
 	 * CACHE command directly to the back end.
 	 */
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
 	 && (++(lun->sync_count) >= lun->sync_interval)) {
 		lun->sync_count = 0;
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		retval = lun->backend->config_write((union ctl_io *)ctsio);
 	} else {
-		mtx_unlock(&ctl_softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		ctl_set_success(ctsio);
 		ctl_done((union ctl_io *)ctsio);
 	}
@@ -5698,9 +5604,9 @@ ctl_format(struct ctl_scsiio *ctsio)
 	 * get them to issue a command that will basically make them think
 	 * they're blowing away the media.
 	 */
-	mtx_lock(&ctl_softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	lun->flags &= ~CTL_LUN_INOPERABLE;
-	mtx_unlock(&ctl_softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	ctsio->scsi_status = SCSI_STATUS_OK;
 	ctsio->io_hdr.status = CTL_SUCCESS;
@@ -6099,7 +6005,7 @@ ctl_control_page_handler(struct ctl_scsi
 
 	softc = control_softc;
 
-	mtx_lock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	if (((current_cp->rlec & SCP_DSENSE) == 0)
 	 && ((user_cp->rlec & SCP_DSENSE) != 0)) {
 		/*
@@ -6195,7 +6101,7 @@ ctl_control_page_handler(struct ctl_scsi
 				CTL_UA_MODE_CHANGE;
 		}
 	}
-	mtx_unlock(&softc->ctl_lock);
+	mtx_unlock(&lun->lun_lock);
 
 	return (0);
 }
@@ -7279,10 +7185,7 @@ ctl_maintenance_in(struct ctl_scsiio *ct
 		return(retval);
 	}
 
-	mtx_lock(&softc->ctl_lock);
 	single = ctl_is_single;
-	mtx_unlock(&softc->ctl_lock);
-
 	if (single)
         	num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
 	else
@@ -7416,7 +7319,7 @@ ctl_persistent_reserve_in(struct ctl_scs
 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
 
 retry:
-	mtx_lock(&softc->ctl_lock);
+	mtx_lock(&lun->lun_lock);
 	switch (cdb->action) {
 	case SPRI_RK: /* read keys */
 		total_len = sizeof(struct scsi_per_res_in_keys) +
@@ -7434,7 +7337,7 @@ retry:
 		break;
 	case SPRI_RS: /* read full status */
 	default:
-		mtx_unlock(&softc->ctl_lock);
+		mtx_unlock(&lun->lun_lock);
 		ctl_set_invalid_field(ctsio,

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201407120434.s6C4Ydjm008784>