Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 23 Mar 2019 22:44:11 +0000 (UTC)
From:      Oleksandr Tymoshenko <gonzo@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-12@freebsd.org
Subject:   svn commit: r345459 - stable/12/sys/compat/ndis
Message-ID:  <201903232244.x2NMiBU3077832@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: gonzo
Date: Sat Mar 23 22:44:11 2019
New Revision: 345459
URL: https://svnweb.freebsd.org/changeset/base/345459

Log:
  MFC r343298:
  
  [ndis] Fix unregistered use of FPU by NDIS in kernel on amd64
  
  amd64 miniport drivers are allowed to use FPU which triggers "Unregistered use
  of FPU in kernel" panic.
  
  Wrap all variants of MSCALL with fpu_kern_enter/fpu_kern_leave.  To reduce
  amount of allocations/deallocations done via
  fpu_kern_alloc_ctx/fpu_kern_free_ctx maintain cache of fpu_kern_ctx elements.
  
  Based on the patch by Paul B Mahol
  
  PR:		165622
  Submitted by:	Vlad Movchan <vladislav.movchan@gmail.com>

Modified:
  stable/12/sys/compat/ndis/kern_windrv.c
  stable/12/sys/compat/ndis/pe_var.h
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/compat/ndis/kern_windrv.c
==============================================================================
--- stable/12/sys/compat/ndis/kern_windrv.c	Sat Mar 23 21:36:59 2019	(r345458)
+++ stable/12/sys/compat/ndis/kern_windrv.c	Sat Mar 23 22:44:11 2019	(r345459)
@@ -58,6 +58,10 @@ __FBSDID("$FreeBSD$");
 #include <machine/segments.h>
 #endif
 
+#ifdef __amd64__
+#include <machine/fpu.h>
+#endif
+
 #include <dev/usb/usb.h>
 
 #include <compat/ndis/pe_var.h>
@@ -68,6 +72,19 @@ __FBSDID("$FreeBSD$");
 #include <compat/ndis/hal_var.h>
 #include <compat/ndis/usbd_var.h>
 
+#ifdef __amd64__
+struct fpu_cc_ent {
+	struct fpu_kern_ctx	*ctx;
+	LIST_ENTRY(fpu_cc_ent)	entries;
+};
+static LIST_HEAD(fpu_ctx_free, fpu_cc_ent) fpu_free_head =
+    LIST_HEAD_INITIALIZER(fpu_free_head);
+static LIST_HEAD(fpu_ctx_busy, fpu_cc_ent) fpu_busy_head =
+    LIST_HEAD_INITIALIZER(fpu_busy_head);
+static struct mtx fpu_free_mtx;
+static struct mtx fpu_busy_mtx;
+#endif
+
 static struct mtx drvdb_mtx;
 static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head;
 
@@ -98,6 +115,13 @@ windrv_libinit(void)
 	mtx_init(&drvdb_mtx, "Windows driver DB lock",
 	    "Windows internal lock", MTX_DEF);
 
+#ifdef __amd64__
+	LIST_INIT(&fpu_free_head);
+	LIST_INIT(&fpu_busy_head);
+	mtx_init(&fpu_free_mtx, "free fpu context list lock", NULL, MTX_DEF);
+	mtx_init(&fpu_busy_mtx, "busy fpu context list lock", NULL, MTX_DEF);
+#endif
+
 	/*
 	 * PCI and pccard devices don't need to use IRPs to
 	 * interact with their bus drivers (usually), so our
@@ -132,6 +156,9 @@ int
 windrv_libfini(void)
 {
 	struct drvdb_ent	*d;
+#ifdef __amd64__
+	struct fpu_cc_ent	*ent;
+#endif
 
 	mtx_lock(&drvdb_mtx); 
 	while(STAILQ_FIRST(&drvdb_head) != NULL) {
@@ -150,6 +177,18 @@ windrv_libfini(void)
 	smp_rendezvous(NULL, x86_oldldt, NULL, NULL);
 	ExFreePool(my_tids);
 #endif
+#ifdef __amd64__
+	while ((ent = LIST_FIRST(&fpu_free_head)) != NULL) {
+		LIST_REMOVE(ent, entries);
+		fpu_kern_free_ctx(ent->ctx);
+		free(ent, M_DEVBUF);
+	}
+	mtx_destroy(&fpu_free_mtx);
+
+	ent = LIST_FIRST(&fpu_busy_head);
+	KASSERT(ent == NULL, ("busy fpu context list is not empty"));
+	mtx_destroy(&fpu_busy_mtx);
+#endif
 	return (0);
 }
 
@@ -614,6 +653,148 @@ windrv_wrap(func, wrap, argcnt, ftype)
 	*wrap = p;
 
 	return (0);
+}
+
+static struct fpu_cc_ent *
+request_fpu_cc_ent(void)
+{
+	struct fpu_cc_ent *ent;
+
+	mtx_lock(&fpu_free_mtx);
+	if ((ent = LIST_FIRST(&fpu_free_head)) != NULL) {
+		LIST_REMOVE(ent, entries);
+		mtx_unlock(&fpu_free_mtx);
+		mtx_lock(&fpu_busy_mtx);
+		LIST_INSERT_HEAD(&fpu_busy_head, ent, entries);
+		mtx_unlock(&fpu_busy_mtx);
+		return (ent);
+	}
+	mtx_unlock(&fpu_free_mtx);
+
+	if ((ent = malloc(sizeof(struct fpu_cc_ent), M_DEVBUF, M_NOWAIT |
+	    M_ZERO)) != NULL) {
+		ent->ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
+		    FPU_KERN_NOWAIT);
+		if (ent->ctx != NULL) {
+			mtx_lock(&fpu_busy_mtx);
+			LIST_INSERT_HEAD(&fpu_busy_head, ent, entries);
+			mtx_unlock(&fpu_busy_mtx);
+		} else {
+			free(ent, M_DEVBUF);
+			ent = NULL;
+		}
+	}
+
+	return (ent);
+}
+
+static void
+release_fpu_cc_ent(struct fpu_cc_ent *ent)
+{
+	mtx_lock(&fpu_busy_mtx);
+	LIST_REMOVE(ent, entries);
+	mtx_unlock(&fpu_busy_mtx);
+	mtx_lock(&fpu_free_mtx);
+	LIST_INSERT_HEAD(&fpu_free_head, ent, entries);
+	mtx_unlock(&fpu_free_mtx);
+}
+
+uint64_t
+_x86_64_call1(void *fn, uint64_t a)
+{
+	struct fpu_cc_ent *ent;
+	uint64_t ret;
+
+	if ((ent = request_fpu_cc_ent()) == NULL)
+		return (ENOMEM);
+	fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
+	ret = x86_64_call1(fn, a);
+	fpu_kern_leave(curthread, ent->ctx);
+	release_fpu_cc_ent(ent);
+
+	return (ret);
+}
+
+uint64_t
+_x86_64_call2(void *fn, uint64_t a, uint64_t b)
+{
+	struct fpu_cc_ent *ent;
+	uint64_t ret;
+
+	if ((ent = request_fpu_cc_ent()) == NULL)
+		return (ENOMEM);
+	fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
+	ret = x86_64_call2(fn, a, b);
+	fpu_kern_leave(curthread, ent->ctx);
+	release_fpu_cc_ent(ent);
+
+	return (ret);
+}
+
+uint64_t
+_x86_64_call3(void *fn, uint64_t a, uint64_t b, uint64_t c)
+{
+	struct fpu_cc_ent *ent;
+	uint64_t ret;
+
+	if ((ent = request_fpu_cc_ent()) == NULL)
+		return (ENOMEM);
+	fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
+	ret = x86_64_call3(fn, a, b, c);
+	fpu_kern_leave(curthread, ent->ctx);
+	release_fpu_cc_ent(ent);
+
+	return (ret);
+}
+
+uint64_t
+_x86_64_call4(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d)
+{
+	struct fpu_cc_ent *ent;
+	uint64_t ret;
+
+	if ((ent = request_fpu_cc_ent()) == NULL)
+		return (ENOMEM);
+	fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
+	ret = x86_64_call4(fn, a, b, c, d);
+	fpu_kern_leave(curthread, ent->ctx);
+	release_fpu_cc_ent(ent);
+
+	return (ret);
+}
+
+uint64_t
+_x86_64_call5(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d,
+    uint64_t e)
+{
+	struct fpu_cc_ent *ent;
+	uint64_t ret;
+
+	if ((ent = request_fpu_cc_ent()) == NULL)
+		return (ENOMEM);
+	fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
+	ret = x86_64_call5(fn, a, b, c, d, e);
+	fpu_kern_leave(curthread, ent->ctx);
+	release_fpu_cc_ent(ent);
+
+	return (ret);
+}
+
+uint64_t
+_x86_64_call6(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d,
+    uint64_t e, uint64_t f)
+{
+	struct fpu_cc_ent *ent;
+	uint64_t ret;
+
+	if ((ent = request_fpu_cc_ent()) == NULL)
+		return (ENOMEM);
+	fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
+	ret = x86_64_call6(fn, a, b, c, d, e, f);
+	fpu_kern_leave(curthread, ent->ctx);
+	release_fpu_cc_ent(ent);
+
+	return (ret);
 }
 #endif /* __amd64__ */
 

Modified: stable/12/sys/compat/ndis/pe_var.h
==============================================================================
--- stable/12/sys/compat/ndis/pe_var.h	Sat Mar 23 21:36:59 2019	(r345458)
+++ stable/12/sys/compat/ndis/pe_var.h	Sat Mar 23 22:44:11 2019	(r345459)
@@ -460,22 +460,30 @@ extern uint64_t x86_64_call5(void *, uint64_t, uint64_
 extern uint64_t x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t,
 	uint64_t, uint64_t);
 
+uint64_t _x86_64_call1(void *, uint64_t);
+uint64_t _x86_64_call2(void *, uint64_t, uint64_t);
+uint64_t _x86_64_call3(void *, uint64_t, uint64_t, uint64_t);
+uint64_t _x86_64_call4(void *, uint64_t, uint64_t, uint64_t, uint64_t);
+uint64_t _x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t,
+    uint64_t);
+uint64_t _x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t,
+    uint64_t, uint64_t);
 
 #define	MSCALL1(fn, a)						\
-	x86_64_call1((fn), (uint64_t)(a))
+	_x86_64_call1((fn), (uint64_t)(a))
 #define	MSCALL2(fn, a, b)					\
-	x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b))
+	_x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b))
 #define	MSCALL3(fn, a, b, c)					\
-	x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b),		\
+	_x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b),		\
 	(uint64_t)(c))
 #define	MSCALL4(fn, a, b, c, d)					\
-	x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b),		\
+	_x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b),		\
 	(uint64_t)(c), (uint64_t)(d))
 #define	MSCALL5(fn, a, b, c, d, e)				\
-	x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b),		\
+	_x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b),		\
 	(uint64_t)(c), (uint64_t)(d), (uint64_t)(e))
 #define	MSCALL6(fn, a, b, c, d, e, f)				\
-	x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b),		\
+	_x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b),		\
 	(uint64_t)(c), (uint64_t)(d), (uint64_t)(e), (uint64_t)(f))
 
 #endif /* __amd64__ */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201903232244.x2NMiBU3077832>