Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 28 Aug 2013 20:59:23 +0000 (UTC)
From:      Navdeep Parhar <np@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r255006 - in head/sys/dev/cxgbe: . tom
Message-ID:  <201308282059.r7SKxNIN084368@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: np
Date: Wed Aug 28 20:59:22 2013
New Revision: 255006
URL: http://svnweb.freebsd.org/changeset/base/255006

Log:
  Change t4_list_lock and t4_uld_list_lock from mutexes to sx'es.
  
  - tom_uninit had to be reworked not to hold the adapter lock (a mutex)
    around t4_deactivate_uld, which acquires the uld_list_lock.
  - the ifc_match for the interface cloner that creates the tracer ifnet
    had to be reworked as the kernel calls ifc_match with the global
    if_cloners_mtx held.

Modified:
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/t4_tracer.c
  head/sys/dev/cxgbe/tom/t4_tom.c

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Wed Aug 28 20:45:45 2013	(r255005)
+++ head/sys/dev/cxgbe/t4_main.c	Wed Aug 28 20:59:22 2013	(r255006)
@@ -160,10 +160,10 @@ MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio
  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
  * then ADAPTER_LOCK, then t4_uld_list_lock.
  */
-static struct mtx t4_list_lock;
+static struct sx t4_list_lock;
 static SLIST_HEAD(, adapter) t4_list;
 #ifdef TCP_OFFLOAD
-static struct mtx t4_uld_list_lock;
+static struct sx t4_uld_list_lock;
 static SLIST_HEAD(, uld_info) t4_uld_list;
 #endif
 
@@ -568,9 +568,9 @@ t4_attach(device_t dev)
 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
 	    device_get_nameunit(dev));
 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
-	mtx_lock(&t4_list_lock);
+	sx_xlock(&t4_list_lock);
 	SLIST_INSERT_HEAD(&t4_list, sc, link);
-	mtx_unlock(&t4_list_lock);
+	sx_xunlock(&t4_list_lock);
 
 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
 	TAILQ_INIT(&sc->sfl);
@@ -917,9 +917,9 @@ t4_detach(device_t dev)
 	free(sc->tids.ftid_tab, M_CXGBE);
 	t4_destroy_dma_tag(sc);
 	if (mtx_initialized(&sc->sc_lock)) {
-		mtx_lock(&t4_list_lock);
+		sx_xlock(&t4_list_lock);
 		SLIST_REMOVE(&t4_list, sc, adapter, link);
-		mtx_unlock(&t4_list_lock);
+		sx_xunlock(&t4_list_lock);
 		mtx_destroy(&sc->sc_lock);
 	}
 
@@ -7341,7 +7341,7 @@ t4_iterate(void (*func)(struct adapter *
 {
 	struct adapter *sc;
 
-	mtx_lock(&t4_list_lock);
+	sx_slock(&t4_list_lock);
 	SLIST_FOREACH(sc, &t4_list, link) {
 		/*
 		 * func should not make any assumptions about what state sc is
@@ -7349,7 +7349,7 @@ t4_iterate(void (*func)(struct adapter *
 		 */
 		func(sc, arg);
 	}
-	mtx_unlock(&t4_list_lock);
+	sx_sunlock(&t4_list_lock);
 }
 
 static int
@@ -7577,7 +7577,7 @@ t4_register_uld(struct uld_info *ui)
 	int rc = 0;
 	struct uld_info *u;
 
-	mtx_lock(&t4_uld_list_lock);
+	sx_xlock(&t4_uld_list_lock);
 	SLIST_FOREACH(u, &t4_uld_list, link) {
 	    if (u->uld_id == ui->uld_id) {
 		    rc = EEXIST;
@@ -7588,7 +7588,7 @@ t4_register_uld(struct uld_info *ui)
 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
 	ui->refcount = 0;
 done:
-	mtx_unlock(&t4_uld_list_lock);
+	sx_xunlock(&t4_uld_list_lock);
 	return (rc);
 }
 
@@ -7598,7 +7598,7 @@ t4_unregister_uld(struct uld_info *ui)
 	int rc = EINVAL;
 	struct uld_info *u;
 
-	mtx_lock(&t4_uld_list_lock);
+	sx_xlock(&t4_uld_list_lock);
 
 	SLIST_FOREACH(u, &t4_uld_list, link) {
 	    if (u == ui) {
@@ -7613,7 +7613,7 @@ t4_unregister_uld(struct uld_info *ui)
 	    }
 	}
 done:
-	mtx_unlock(&t4_uld_list_lock);
+	sx_xunlock(&t4_uld_list_lock);
 	return (rc);
 }
 
@@ -7625,7 +7625,7 @@ t4_activate_uld(struct adapter *sc, int 
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
-	mtx_lock(&t4_uld_list_lock);
+	sx_slock(&t4_uld_list_lock);
 
 	SLIST_FOREACH(ui, &t4_uld_list, link) {
 		if (ui->uld_id == id) {
@@ -7636,7 +7636,7 @@ t4_activate_uld(struct adapter *sc, int 
 		}
 	}
 done:
-	mtx_unlock(&t4_uld_list_lock);
+	sx_sunlock(&t4_uld_list_lock);
 
 	return (rc);
 }
@@ -7649,7 +7649,7 @@ t4_deactivate_uld(struct adapter *sc, in
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
-	mtx_lock(&t4_uld_list_lock);
+	sx_slock(&t4_uld_list_lock);
 
 	SLIST_FOREACH(ui, &t4_uld_list, link) {
 		if (ui->uld_id == id) {
@@ -7660,7 +7660,7 @@ t4_deactivate_uld(struct adapter *sc, in
 		}
 	}
 done:
-	mtx_unlock(&t4_uld_list_lock);
+	sx_sunlock(&t4_uld_list_lock);
 
 	return (rc);
 }
@@ -7741,10 +7741,10 @@ mod_event(module_t mod, int cmd, void *a
 		if (atomic_fetchadd_int(&loaded, 1))
 			break;
 		t4_sge_modload();
-		mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
+		sx_init(&t4_list_lock, "T4/T5 adapters");
 		SLIST_INIT(&t4_list);
 #ifdef TCP_OFFLOAD
-		mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
+		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
 		SLIST_INIT(&t4_uld_list);
 #endif
 		t4_tracer_modload();
@@ -7756,23 +7756,23 @@ mod_event(module_t mod, int cmd, void *a
 			break;
 		t4_tracer_modunload();
 #ifdef TCP_OFFLOAD
-		mtx_lock(&t4_uld_list_lock);
+		sx_slock(&t4_uld_list_lock);
 		if (!SLIST_EMPTY(&t4_uld_list)) {
 			rc = EBUSY;
-			mtx_unlock(&t4_uld_list_lock);
+			sx_sunlock(&t4_uld_list_lock);
 			break;
 		}
-		mtx_unlock(&t4_uld_list_lock);
-		mtx_destroy(&t4_uld_list_lock);
+		sx_sunlock(&t4_uld_list_lock);
+		sx_destroy(&t4_uld_list_lock);
 #endif
-		mtx_lock(&t4_list_lock);
+		sx_slock(&t4_list_lock);
 		if (!SLIST_EMPTY(&t4_list)) {
 			rc = EBUSY;
-			mtx_unlock(&t4_list_lock);
+			sx_sunlock(&t4_list_lock);
 			break;
 		}
-		mtx_unlock(&t4_list_lock);
-		mtx_destroy(&t4_list_lock);
+		sx_sunlock(&t4_list_lock);
+		sx_destroy(&t4_list_lock);
 		break;
 	}
 

Modified: head/sys/dev/cxgbe/t4_tracer.c
==============================================================================
--- head/sys/dev/cxgbe/t4_tracer.c	Wed Aug 28 20:45:45 2013	(r255005)
+++ head/sys/dev/cxgbe/t4_tracer.c	Wed Aug 28 20:59:22 2013	(r255006)
@@ -121,14 +121,13 @@ match_name(struct adapter *sc, void *arg
 static int
 t4_cloner_match(struct if_clone *ifc, const char *name)
 {
-	struct match_rr mrr;
 
-	mrr.name = name;
-	mrr.lock = 0;
-	mrr.sc = NULL;
-	t4_iterate(match_name, &mrr);
-
-	return (mrr.sc != NULL);
+	if (strncmp(name, "t4nex", 5) != 0 &&
+	    strncmp(name, "t5nex", 5) != 0)
+		return (0);
+	if (name[5] < '0' || name[5] > '9')
+		return (0);
+	return (1);
 }
 
 static int

Modified: head/sys/dev/cxgbe/tom/t4_tom.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_tom.c	Wed Aug 28 20:45:45 2013	(r255005)
+++ head/sys/dev/cxgbe/tom/t4_tom.c	Wed Aug 28 20:59:22 2013	(r255006)
@@ -1064,14 +1064,14 @@ t4_tom_mod_load(void)
 static void
 tom_uninit(struct adapter *sc, void *arg __unused)
 {
-	if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomun"))
+	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
 		return;
 
 	/* Try to free resources (works only if no port has IFCAP_TOE) */
 	if (sc->flags & TOM_INIT_DONE)
 		t4_deactivate_uld(sc, ULD_TOM);
 
-	end_synchronized_op(sc, LOCK_HELD);
+	end_synchronized_op(sc, 0);
 }
 
 static int



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201308282059.r7SKxNIN084368>