Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 23 Apr 2021 11:14:39 GMT
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-branches@FreeBSD.org
Subject:   git: cd317a921331 - stable/13 - linuxkpi: copy ldev into local to test and free the same pointer
Message-ID:  <202104231114.13NBEdba012669@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch stable/13 has been updated by kib:

URL: https://cgit.FreeBSD.org/src/commit/?id=cd317a92133140ad78f296af5315cd8ffb08b046

commit cd317a92133140ad78f296af5315cd8ffb08b046
Author:     Konstantin Belousov <kib@FreeBSD.org>
AuthorDate: 2021-03-30 08:41:00 +0000
Commit:     Konstantin Belousov <kib@FreeBSD.org>
CommitDate: 2021-04-23 11:14:06 +0000

    linuxkpi: copy ldev into local to test and free the same pointer
    
    (cherry picked from commit 7b0125cbec1579a0a1bf38f7abe583b5f6fd79e7)
---
 sys/compat/linuxkpi/common/src/linux_compat.c      |    5 +-
 sys/compat/linuxkpi/common/src/linux_compat.c.orig | 2598 ++++++++++++++++++++
 2 files changed, 2601 insertions(+), 2 deletions(-)

diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c
index 5f7e2664bee1..a9fa50ded219 100644
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -1513,8 +1513,9 @@ linux_file_close(struct file *file, struct thread *td)
 	if (filp->f_vnode != NULL)
 		vdrop(filp->f_vnode);
 	linux_drop_fop(ldev);
-	if (filp->f_cdev != NULL)
-		linux_cdev_deref(filp->f_cdev);
+	ldev = filp->f_cdev;
+	if (ldev != NULL)
+		linux_cdev_deref(ldev);
 	kfree(filp);
 
 	return (error);
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c.orig b/sys/compat/linuxkpi/common/src/linux_compat.c.orig
new file mode 100644
index 000000000000..42f47b6e510a
--- /dev/null
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c.orig
@@ -0,0 +1,2598 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_stack.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/proc.h>
+#include <sys/sglist.h>
+#include <sys/sleepqueue.h>
+#include <sys/refcount.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filio.h>
+#include <sys/rwlock.h>
+#include <sys/mman.h>
+#include <sys/stack.h>
+#include <sys/time.h>
+#include <sys/user.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+#include <machine/stdarg.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <machine/md_var.h>
+#endif
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include <linux/sysfs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/wait_bit.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <asm/smp.h>
+#endif
+
+SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+    "LinuxKPI parameters");
+
+int linuxkpi_debug;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN,
+    &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable.");
+
+static struct timeval lkpi_net_lastlog;
+static int lkpi_net_curpps;
+static int lkpi_net_maxpps = 99;
+SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN,
+    &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second.");
+
+MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
+
+#include <linux/rbtree.h>
+/* Undo Linux compat changes. */
+#undef RB_ROOT
+#undef file
+#undef cdev
+#define	RB_ROOT(head)	(head)->rbh_root
+
+static void linux_cdev_deref(struct linux_cdev *ldev);
+static struct vm_area_struct *linux_cdev_handle_find(void *handle);
+
+struct kobject linux_class_root;
+struct device linux_root_device;
+struct class linux_class_misc;
+struct list_head pci_drivers;
+struct list_head pci_devices;
+spinlock_t pci_lock;
+
+unsigned long linux_timer_hz_mask;
+
+wait_queue_head_t linux_bit_waitq;
+wait_queue_head_t linux_var_waitq;
+
+int
+panic_cmp(struct rb_node *one, struct rb_node *two)
+{
+	panic("no cmp");
+}
+
+RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
+
+int
+kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
+{
+	va_list tmp_va;
+	int len;
+	char *old;
+	char *name;
+	char dummy;
+
+	old = kobj->name;
+
+	if (old && fmt == NULL)
+		return (0);
+
+	/* compute length of string */
+	va_copy(tmp_va, args);
+	len = vsnprintf(&dummy, 0, fmt, tmp_va);
+	va_end(tmp_va);
+
+	/* account for zero termination */
+	len++;
+
+	/* check for error */
+	if (len < 1)
+		return (-EINVAL);
+
+	/* allocate memory for string */
+	name = kzalloc(len, GFP_KERNEL);
+	if (name == NULL)
+		return (-ENOMEM);
+	vsnprintf(name, len, fmt, args);
+	kobj->name = name;
+
+	/* free old string */
+	kfree(old);
+
+	/* filter new string */
+	for (; *name != '\0'; name++)
+		if (*name == '/')
+			*name = '!';
+	return (0);
+}
+
+int
+kobject_set_name(struct kobject *kobj, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+
+	va_start(args, fmt);
+	error = kobject_set_name_vargs(kobj, fmt, args);
+	va_end(args);
+
+	return (error);
+}
+
+static int
+kobject_add_complete(struct kobject *kobj, struct kobject *parent)
+{
+	const struct kobj_type *t;
+	int error;
+
+	kobj->parent = parent;
+	error = sysfs_create_dir(kobj);
+	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
+		struct attribute **attr;
+		t = kobj->ktype;
+
+		for (attr = t->default_attrs; *attr != NULL; attr++) {
+			error = sysfs_create_file(kobj, *attr);
+			if (error)
+				break;
+		}
+		if (error)
+			sysfs_remove_dir(kobj);
+	}
+	return (error);
+}
+
+int
+kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+
+	va_start(args, fmt);
+	error = kobject_set_name_vargs(kobj, fmt, args);
+	va_end(args);
+	if (error)
+		return (error);
+
+	return kobject_add_complete(kobj, parent);
+}
+
+void
+linux_kobject_release(struct kref *kref)
+{
+	struct kobject *kobj;
+	char *name;
+
+	kobj = container_of(kref, struct kobject, kref);
+	sysfs_remove_dir(kobj);
+	name = kobj->name;
+	if (kobj->ktype && kobj->ktype->release)
+		kobj->ktype->release(kobj);
+	kfree(name);
+}
+
+static void
+linux_kobject_kfree(struct kobject *kobj)
+{
+	kfree(kobj);
+}
+
+static void
+linux_kobject_kfree_name(struct kobject *kobj)
+{
+	if (kobj) {
+		kfree(kobj->name);
+	}
+}
+
+const struct kobj_type linux_kfree_type = {
+	.release = linux_kobject_kfree
+};
+
+static void
+linux_device_release(struct device *dev)
+{
+	pr_debug("linux_device_release: %s\n", dev_name(dev));
+	kfree(dev);
+}
+
+static ssize_t
+linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct class_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct class_attribute, attr);
+	error = -EIO;
+	if (dattr->show)
+		error = dattr->show(container_of(kobj, struct class, kobj),
+		    dattr, buf);
+	return (error);
+}
+
+static ssize_t
+linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+    size_t count)
+{
+	struct class_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct class_attribute, attr);
+	error = -EIO;
+	if (dattr->store)
+		error = dattr->store(container_of(kobj, struct class, kobj),
+		    dattr, buf, count);
+	return (error);
+}
+
+static void
+linux_class_release(struct kobject *kobj)
+{
+	struct class *class;
+
+	class = container_of(kobj, struct class, kobj);
+	if (class->class_release)
+		class->class_release(class);
+}
+
+static const struct sysfs_ops linux_class_sysfs = {
+	.show  = linux_class_show,
+	.store = linux_class_store,
+};
+
+const struct kobj_type linux_class_ktype = {
+	.release = linux_class_release,
+	.sysfs_ops = &linux_class_sysfs
+};
+
+static void
+linux_dev_release(struct kobject *kobj)
+{
+	struct device *dev;
+
+	dev = container_of(kobj, struct device, kobj);
+	/* This is the precedence defined by linux. */
+	if (dev->release)
+		dev->release(dev);
+	else if (dev->class && dev->class->dev_release)
+		dev->class->dev_release(dev);
+}
+
+static ssize_t
+linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct device_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct device_attribute, attr);
+	error = -EIO;
+	if (dattr->show)
+		error = dattr->show(container_of(kobj, struct device, kobj),
+		    dattr, buf);
+	return (error);
+}
+
+static ssize_t
+linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+    size_t count)
+{
+	struct device_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct device_attribute, attr);
+	error = -EIO;
+	if (dattr->store)
+		error = dattr->store(container_of(kobj, struct device, kobj),
+		    dattr, buf, count);
+	return (error);
+}
+
+static const struct sysfs_ops linux_dev_sysfs = {
+	.show  = linux_dev_show,
+	.store = linux_dev_store,
+};
+
+const struct kobj_type linux_dev_ktype = {
+	.release = linux_dev_release,
+	.sysfs_ops = &linux_dev_sysfs
+};
+
+struct device *
+device_create(struct class *class, struct device *parent, dev_t devt,
+    void *drvdata, const char *fmt, ...)
+{
+	struct device *dev;
+	va_list args;
+
+	dev = kzalloc(sizeof(*dev), M_WAITOK);
+	dev->parent = parent;
+	dev->class = class;
+	dev->devt = devt;
+	dev->driver_data = drvdata;
+	dev->release = linux_device_release;
+	va_start(args, fmt);
+	kobject_set_name_vargs(&dev->kobj, fmt, args);
+	va_end(args);
+	device_register(dev);
+
+	return (dev);
+}
+
+int
+kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
+    struct kobject *parent, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+
+	kobject_init(kobj, ktype);
+	kobj->ktype = ktype;
+	kobj->parent = parent;
+	kobj->name = NULL;
+
+	va_start(args, fmt);
+	error = kobject_set_name_vargs(kobj, fmt, args);
+	va_end(args);
+	if (error)
+		return (error);
+	return kobject_add_complete(kobj, parent);
+}
+
+static void
+linux_kq_lock(void *arg)
+{
+	spinlock_t *s = arg;
+
+	spin_lock(s);
+}
+static void
+linux_kq_unlock(void *arg)
+{
+	spinlock_t *s = arg;
+
+	spin_unlock(s);
+}
+
+static void
+linux_kq_assert_lock(void *arg, int what)
+{
+#ifdef INVARIANTS
+	spinlock_t *s = arg;
+
+	if (what == LA_LOCKED)
+		mtx_assert(&s->m, MA_OWNED);
+	else
+		mtx_assert(&s->m, MA_NOTOWNED);
+#endif
+}
+
+static void
+linux_file_kqfilter_poll(struct linux_file *, int);
+
+struct linux_file *
+linux_file_alloc(void)
+{
+	struct linux_file *filp;
+
+	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+
+	/* set initial refcount */
+	filp->f_count = 1;
+
+	/* setup fields needed by kqueue support */
+	spin_lock_init(&filp->f_kqlock);
+	knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
+	    linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock);
+
+	return (filp);
+}
+
+void
+linux_file_free(struct linux_file *filp)
+{
+	if (filp->_file == NULL) {
+		if (filp->f_shmem != NULL)
+			vm_object_deallocate(filp->f_shmem);
+		kfree(filp);
+	} else {
+		/*
+		 * The close method of the character device or file
+		 * will free the linux_file structure:
+		 */
+		_fdrop(filp->_file, curthread);
+	}
+}
+
+static int
+linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
+    vm_page_t *mres)
+{
+	struct vm_area_struct *vmap;
+
+	vmap = linux_cdev_handle_find(vm_obj->handle);
+
+	MPASS(vmap != NULL);
+	MPASS(vmap->vm_private_data == vm_obj->handle);
+
+	if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
+		vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
+		vm_page_t page;
+
+		if (((*mres)->flags & PG_FICTITIOUS) != 0) {
+			/*
+			 * If the passed in result page is a fake
+			 * page, update it with the new physical
+			 * address.
+			 */
+			page = *mres;
+			vm_page_updatefake(page, paddr, vm_obj->memattr);
+		} else {
+			/*
+			 * Replace the passed in "mres" page with our
+			 * own fake page and free up the all of the
+			 * original pages.
+			 */
+			VM_OBJECT_WUNLOCK(vm_obj);
+			page = vm_page_getfake(paddr, vm_obj->memattr);
+			VM_OBJECT_WLOCK(vm_obj);
+
+			vm_page_replace(page, vm_obj, (*mres)->pindex, *mres);
+			*mres = page;
+		}
+		vm_page_valid(page);
+		return (VM_PAGER_OK);
+	}
+	return (VM_PAGER_FAIL);
+}
+
+static int
+linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
+    vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
+{
+	struct vm_area_struct *vmap;
+	int err;
+
+	/* get VM area structure */
+	vmap = linux_cdev_handle_find(vm_obj->handle);
+	MPASS(vmap != NULL);
+	MPASS(vmap->vm_private_data == vm_obj->handle);
+
+	VM_OBJECT_WUNLOCK(vm_obj);
+
+	linux_set_current(curthread);
+
+	down_write(&vmap->vm_mm->mmap_sem);
+	if (unlikely(vmap->vm_ops == NULL)) {
+		err = VM_FAULT_SIGBUS;
+	} else {
+		struct vm_fault vmf;
+
+		/* fill out VM fault structure */
+		vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
+		vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
+		vmf.pgoff = 0;
+		vmf.page = NULL;
+		vmf.vma = vmap;
+
+		vmap->vm_pfn_count = 0;
+		vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
+		vmap->vm_obj = vm_obj;
+
+		err = vmap->vm_ops->fault(vmap, &vmf);
+
+		while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
+			kern_yield(PRI_USER);
+			err = vmap->vm_ops->fault(vmap, &vmf);
+		}
+	}
+
+	/* translate return code */
+	switch (err) {
+	case VM_FAULT_OOM:
+		err = VM_PAGER_AGAIN;
+		break;
+	case VM_FAULT_SIGBUS:
+		err = VM_PAGER_BAD;
+		break;
+	case VM_FAULT_NOPAGE:
+		/*
+		 * By contract the fault handler will return having
+		 * busied all the pages itself. If pidx is already
+		 * found in the object, it will simply xbusy the first
+		 * page and return with vm_pfn_count set to 1.
+		 */
+		*first = vmap->vm_pfn_first;
+		*last = *first + vmap->vm_pfn_count - 1;
+		err = VM_PAGER_OK;
+		break;
+	default:
+		err = VM_PAGER_ERROR;
+		break;
+	}
+	up_write(&vmap->vm_mm->mmap_sem);
+	VM_OBJECT_WLOCK(vm_obj);
+	return (err);
+}
+
+static struct rwlock linux_vma_lock;
+static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
+    TAILQ_HEAD_INITIALIZER(linux_vma_head);
+
+static void
+linux_cdev_handle_free(struct vm_area_struct *vmap)
+{
+	/* Drop reference on vm_file */
+	if (vmap->vm_file != NULL)
+		fput(vmap->vm_file);
+
+	/* Drop reference on mm_struct */
+	mmput(vmap->vm_mm);
+
+	kfree(vmap);
+}
+
+static void
+linux_cdev_handle_remove(struct vm_area_struct *vmap)
+{
+	rw_wlock(&linux_vma_lock);
+	TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
+	rw_wunlock(&linux_vma_lock);
+}
+
+static struct vm_area_struct *
+linux_cdev_handle_find(void *handle)
+{
+	struct vm_area_struct *vmap;
+
+	rw_rlock(&linux_vma_lock);
+	TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
+		if (vmap->vm_private_data == handle)
+			break;
+	}
+	rw_runlock(&linux_vma_lock);
+	return (vmap);
+}
+
+static int
+linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+		      vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+
+	MPASS(linux_cdev_handle_find(handle) != NULL);
+	*color = 0;
+	return (0);
+}
+
+static void
+linux_cdev_pager_dtor(void *handle)
+{
+	const struct vm_operations_struct *vm_ops;
+	struct vm_area_struct *vmap;
+
+	vmap = linux_cdev_handle_find(handle);
+	MPASS(vmap != NULL);
+
+	/*
+	 * Remove handle before calling close operation to prevent
+	 * other threads from reusing the handle pointer.
+	 */
+	linux_cdev_handle_remove(vmap);
+
+	down_write(&vmap->vm_mm->mmap_sem);
+	vm_ops = vmap->vm_ops;
+	if (likely(vm_ops != NULL))
+		vm_ops->close(vmap);
+	up_write(&vmap->vm_mm->mmap_sem);
+
+	linux_cdev_handle_free(vmap);
+}
+
+static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
+  {
+	/* OBJT_MGTDEVICE */
+	.cdev_pg_populate	= linux_cdev_pager_populate,
+	.cdev_pg_ctor	= linux_cdev_pager_ctor,
+	.cdev_pg_dtor	= linux_cdev_pager_dtor
+  },
+  {
+	/* OBJT_DEVICE */
+	.cdev_pg_fault	= linux_cdev_pager_fault,
+	.cdev_pg_ctor	= linux_cdev_pager_ctor,
+	.cdev_pg_dtor	= linux_cdev_pager_dtor
+  },
+};
+
+int
+zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+    unsigned long size)
+{
+	vm_object_t obj;
+	vm_page_t m;
+
+	obj = vma->vm_obj;
+	if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
+		return (-ENOTSUP);
+	VM_OBJECT_RLOCK(obj);
+	for (m = vm_page_find_least(obj, OFF_TO_IDX(address));
+	    m != NULL && m->pindex < OFF_TO_IDX(address + size);
+	    m = TAILQ_NEXT(m, listq))
+		pmap_remove_all(m);
+	VM_OBJECT_RUNLOCK(obj);
+	return (0);
+}
+
+static struct file_operations dummy_ldev_ops = {
+	/* XXXKIB */
+};
+
+static struct linux_cdev dummy_ldev = {
+	.ops = &dummy_ldev_ops,
+};
+
+#define	LDEV_SI_DTR	0x0001
+#define	LDEV_SI_REF	0x0002
+
+static void
+linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
+    struct linux_cdev **dev)
+{
+	struct linux_cdev *ldev;
+	u_int siref;
+
+	ldev = filp->f_cdev;
+	*fop = filp->f_op;
+	if (ldev != NULL) {
+		for (siref = ldev->siref;;) {
+			if ((siref & LDEV_SI_DTR) != 0) {
+				ldev = &dummy_ldev;
+				siref = ldev->siref;
+				*fop = ldev->ops;
+				MPASS((ldev->siref & LDEV_SI_DTR) == 0);
+			} else if (atomic_fcmpset_int(&ldev->siref, &siref,
+			    siref + LDEV_SI_REF)) {
+				break;
+			}
+		}
+	}
+	*dev = ldev;
+}
+
+static void
+linux_drop_fop(struct linux_cdev *ldev)
+{
+
+	if (ldev == NULL)
+		return;
+	MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
+	atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
+}
+
+#define	OPW(fp,td,code) ({			\
+	struct file *__fpop;			\
+	__typeof(code) __retval;		\
+						\
+	__fpop = (td)->td_fpop;			\
+	(td)->td_fpop = (fp);			\
+	__retval = (code);			\
+	(td)->td_fpop = __fpop;			\
+	__retval;				\
+})
+
+static int
+linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
+    struct file *file)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	const struct file_operations *fop;
+	int error;
+
+	ldev = dev->si_drv1;
+
+	filp = linux_file_alloc();
+	filp->f_dentry = &filp->f_dentry_store;
+	filp->f_op = ldev->ops;
+	filp->f_mode = file->f_flag;
+	filp->f_flags = file->f_flag;
+	filp->f_vnode = file->f_vnode;
+	filp->_file = file;
+	refcount_acquire(&ldev->refs);
+	filp->f_cdev = ldev;
+
+	linux_set_current(td);
+	linux_get_fop(filp, &fop, &ldev);
+
+	if (fop->open != NULL) {
+		error = -fop->open(file->f_vnode, filp);
+		if (error != 0) {
+			linux_drop_fop(ldev);
+			linux_cdev_deref(filp->f_cdev);
+			kfree(filp);
+			return (error);
+		}
+	}
+
+	/* hold on to the vnode - used for fstat() */
+	vhold(filp->f_vnode);
+
+	/* release the file from devfs */
+	finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
+	linux_drop_fop(ldev);
+	return (ENXIO);
+}
+
+#define	LINUX_IOCTL_MIN_PTR 0x10000UL
+#define	LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
+
+static inline int
+linux_remap_address(void **uaddr, size_t len)
+{
+	uintptr_t uaddr_val = (uintptr_t)(*uaddr);
+
+	if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
+	    uaddr_val < LINUX_IOCTL_MAX_PTR)) {
+		struct task_struct *pts = current;
+		if (pts == NULL) {
+			*uaddr = NULL;
+			return (1);
+		}
+
+		/* compute data offset */
+		uaddr_val -= LINUX_IOCTL_MIN_PTR;
+
+		/* check that length is within bounds */
+		if ((len > IOCPARM_MAX) ||
+		    (uaddr_val + len) > pts->bsd_ioctl_len) {
+			*uaddr = NULL;
+			return (1);
+		}
+
+		/* re-add kernel buffer address */
+		uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
+
+		/* update address location */
+		*uaddr = (void *)uaddr_val;
+		return (1);
+	}
+	return (0);
+}
+
+int
+linux_copyin(const void *uaddr, void *kaddr, size_t len)
+{
+	if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
+		if (uaddr == NULL)
+			return (-EFAULT);
+		memcpy(kaddr, uaddr, len);
+		return (0);
+	}
+	return (-copyin(uaddr, kaddr, len));
+}
+
+int
+linux_copyout(const void *kaddr, void *uaddr, size_t len)
+{
+	if (linux_remap_address(&uaddr, len)) {
+		if (uaddr == NULL)
+			return (-EFAULT);
+		memcpy(uaddr, kaddr, len);
+		return (0);
+	}
+	return (-copyout(kaddr, uaddr, len));
+}
+
+size_t
+linux_clear_user(void *_uaddr, size_t _len)
+{
+	uint8_t *uaddr = _uaddr;
+	size_t len = _len;
+
+	/* make sure uaddr is aligned before going into the fast loop */
+	while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
+		if (subyte(uaddr, 0))
+			return (_len);
+		uaddr++;
+		len--;
+	}
+
+	/* zero 8 bytes at a time */
+	while (len > 7) {
+#ifdef __LP64__
+		if (suword64(uaddr, 0))
+			return (_len);
+#else
+		if (suword32(uaddr, 0))
+			return (_len);
+		if (suword32(uaddr + 4, 0))
+			return (_len);
+#endif
+		uaddr += 8;
+		len -= 8;
+	}
+
+	/* zero fill end, if any */
+	while (len > 0) {
+		if (subyte(uaddr, 0))
+			return (_len);
+		uaddr++;
+		len--;
+	}
+	return (0);
+}
+
+int
+linux_access_ok(const void *uaddr, size_t len)
+{
+	uintptr_t saddr;
+	uintptr_t eaddr;
+
+	/* get start and end address */
+	saddr = (uintptr_t)uaddr;
+	eaddr = (uintptr_t)uaddr + len;
+
+	/* verify addresses are valid for userspace */
+	return ((saddr == eaddr) ||
+	    (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
+}
+
+/*
+ * This function should return either EINTR or ERESTART depending on
+ * the signal type sent to this thread:
+ */
+static int
+linux_get_error(struct task_struct *task, int error)
+{
+	/* check for signal type interrupt code */
+	if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
+		error = -linux_schedule_get_interrupt_value(task);
+		if (error == 0)
+			error = EINTR;
+	}
+	return (error);
+}
+
+static int
+linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
+    const struct file_operations *fop, u_long cmd, caddr_t data,
+    struct thread *td)
+{
+	struct task_struct *task = current;
+	unsigned size;
+	int error;
+
+	size = IOCPARM_LEN(cmd);
+	/* refer to logic in sys_ioctl() */
+	if (size > 0) {
+		/*
+		 * Setup hint for linux_copyin() and linux_copyout().
+		 *
+		 * Background: Linux code expects a user-space address
+		 * while FreeBSD supplies a kernel-space address.
+		 */
+		task->bsd_ioctl_data = data;
*** 1651 LINES SKIPPED ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202104231114.13NBEdba012669>