Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 05 Mar 2013 16:45:02 +0100
From:      "Ronald Klop" <ronald-freebsd8@klop.yi.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org, "Konstantin Belousov" <kib@freebsd.org>
Subject:   Re: svn commit: r247835 - in head/sys: dev/drm2 dev/drm2/ttm modules/drm2/drm2
Message-ID:  <op.wthcpc0j8527sy@ronaldradial.versatec.local>
In-Reply-To: <201303050949.r259nYRn020421@svn.freebsd.org>
References:  <201303050949.r259nYRn020421@svn.freebsd.org>

next in thread | previous in thread | raw e-mail | index | archive | help
I know this is not done yet, but I appreciate the work very much.

Regards,
Ronald.

On Tue, 05 Mar 2013 10:49:34 +0100, Konstantin Belousov <kib@freebsd.org>  
wrote:

> Author: kib
> Date: Tue Mar  5 09:49:34 2013
> New Revision: 247835
> URL: http://svnweb.freebsd.org/changeset/base/247835
>
> Log:
>   Import the preliminary port of the TTM.
>  The early commit is done to facilitate the off-tree work on the
>   porting of the Radeon driver.
>  Sponsored by:	The FreeBSD Foundation
>   Debugged and tested by:	    dumbbell
>   MFC after:	1 month
>
> Added:
>   head/sys/dev/drm2/ttm/
>   head/sys/dev/drm2/ttm/ttm_agp_backend.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_bo.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_bo_api.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_bo_driver.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_bo_manager.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_bo_util.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_bo_vm.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_execbuf_util.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_execbuf_util.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_lock.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_lock.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_memory.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_memory.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_module.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_object.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_object.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_page_alloc.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_page_alloc.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_page_alloc_dma.c   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_placement.h   (contents, props changed)
>   head/sys/dev/drm2/ttm/ttm_tt.c   (contents, props changed)
> Modified:
>   head/sys/dev/drm2/drmP.h
>   head/sys/dev/drm2/drm_drv.c
>   head/sys/dev/drm2/drm_gem.c
>   head/sys/modules/drm2/drm2/Makefile
>
> Modified: head/sys/dev/drm2/drmP.h
> ==============================================================================
> --- head/sys/dev/drm2/drmP.h	Tue Mar  5 09:27:21 2013	(r247834)
> +++ head/sys/dev/drm2/drmP.h	Tue Mar  5 09:49:34 2013	(r247835)
> @@ -906,6 +906,7 @@ struct drm_device {
>  	struct drm_minor *control;		/**< Control node for card */
>  	struct drm_minor *primary;		/**< render type primary screen head */
> +	void		  *drm_ttm_bo;
>  	struct unrhdr	  *drw_unrhdr;
>  	/* RB tree of drawable infos */
>  	RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
> @@ -1302,10 +1303,14 @@ void drm_gem_release(struct drm_device *
> int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
>  void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
> -int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,  
> vm_size_t size,
> -    struct vm_object **obj_res, int nprot);
> +int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
> +    vm_size_t size, struct vm_object **obj_res, int nprot);
>  void drm_gem_pager_dtr(void *obj);
> +struct ttm_bo_device;
> +int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
> +    vm_size_t size, struct vm_object **obj_res, int nprot);
> +
>  void drm_device_lock_mtx(struct drm_device *dev);
>  void drm_device_unlock_mtx(struct drm_device *dev);
>  int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
>
> Modified: head/sys/dev/drm2/drm_drv.c
> ==============================================================================
> --- head/sys/dev/drm2/drm_drv.c	Tue Mar  5 09:27:21 2013	(r247834)
> +++ head/sys/dev/drm2/drm_drv.c	Tue Mar  5 09:49:34 2013	(r247835)
> @@ -58,6 +58,8 @@ static int drm_load(struct drm_device *d
>  static void drm_unload(struct drm_device *dev);
>  static drm_pci_id_list_t *drm_find_description(int vendor, int device,
>      drm_pci_id_list_t *idlist);
> +static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
> +    vm_size_t size, struct vm_object **obj_res, int nprot);
> static int
>  drm_modevent(module_t mod, int type, void *data)
> @@ -187,7 +189,7 @@ static struct cdevsw drm_cdevsw = {
>  	.d_ioctl =	drm_ioctl,
>  	.d_poll =	drm_poll,
>  	.d_mmap =	drm_mmap,
> -	.d_mmap_single = drm_gem_mmap_single,
> +	.d_mmap_single = drm_mmap_single,
>  	.d_name =	"drm",
>  	.d_flags =	D_TRACKCLOSE
>  };
> @@ -955,6 +957,23 @@ drm_add_busid_modesetting(struct drm_dev
>  	return (0);
>  }
> +static int
> +drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
> +    struct vm_object **obj_res, int nprot)
> +{
> +	struct drm_device *dev;
> +
> +	dev = drm_get_device_from_kdev(kdev);
> +	if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
> +		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
> +	} else if (dev->drm_ttm_bo != NULL) {
> +		return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size,
> +		    obj_res, nprot));
> +	} else {
> +		return (ENODEV);
> +	}
> +}
> +
>  #if DRM_LINUX
> #include <sys/sysproto.h>
>
> Modified: head/sys/dev/drm2/drm_gem.c
> ==============================================================================
> --- head/sys/dev/drm2/drm_gem.c	Tue Mar  5 09:27:21 2013	(r247834)
> +++ head/sys/dev/drm2/drm_gem.c	Tue Mar  5 09:49:34 2013	(r247835)
> @@ -441,16 +441,12 @@ drm_gem_free_mmap_offset(struct drm_gem_
>  }
> int
> -drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t  
> size,
> +drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,  
> vm_size_t size,
>      struct vm_object **obj_res, int nprot)
>  {
> -	struct drm_device *dev;
>  	struct drm_gem_object *gem_obj;
>  	struct vm_object *vm_obj;
> -	dev = drm_get_device_from_kdev(kdev);
> -	if ((dev->driver->driver_features & DRIVER_GEM) == 0)
> -		return (ENODEV);
>  	DRM_LOCK(dev);
>  	gem_obj = drm_gem_object_from_offset(dev, *offset);
>  	if (gem_obj == NULL) {
>
> Added: head/sys/dev/drm2/ttm/ttm_agp_backend.c
> ==============================================================================
> --- /dev/null	00:00:00 1970	(empty, because file is newly added)
> +++ head/sys/dev/drm2/ttm/ttm_agp_backend.c	Tue Mar  5 09:49:34  
> 2013	(r247835)
> @@ -0,0 +1,145 @@
> +/**************************************************************************
> + *
> + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person  
> obtaining a
> + * copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sub license, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial  
> portions
> + * of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,  
> EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF  
> MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT  
> SHALL
> + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR  
> ANY CLAIM,
> + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
> + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE  
> OR THE
> + * USE OR OTHER DEALINGS IN THE SOFTWARE.
> + *
> +  
> **************************************************************************/
> +/*
> + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
> + *          Keith Packard.
> + */
> +
> +#include <sys/cdefs.h>
> +__FBSDID("$FreeBSD$");
> +
> +#include <dev/drm2/drmP.h>
> +#include <dev/drm2/ttm/ttm_module.h>
> +#include <dev/drm2/ttm/ttm_bo_driver.h>
> +#include <dev/drm2/ttm/ttm_page_alloc.h>
> +#ifdef TTM_HAS_AGP
> +#include <dev/drm2/ttm/ttm_placement.h>
> +
> +struct ttm_agp_backend {
> +	struct ttm_tt ttm;
> +	struct agp_memory *mem;
> +	device_t bridge;
> +};
> +
> +MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
> +
> +static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
> +{
> +	struct ttm_agp_backend *agp_be = container_of(ttm, struct  
> ttm_agp_backend, ttm);
> +	struct drm_mm_node *node = bo_mem->mm_node;
> +	struct agp_memory *mem;
> +	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
> +	unsigned i;
> +
> +	mem = agp_alloc_memory(agp_be->bridge, AGP_USER_MEMORY,  
> ttm->num_pages);
> +	if (unlikely(mem == NULL))
> +		return -ENOMEM;
> +
> +	mem->page_count = 0;
> +	for (i = 0; i < ttm->num_pages; i++) {
> +		vm_page_t page = ttm->pages[i];
> +
> +		if (!page)
> +			page = ttm->dummy_read_page;
> +
> +		mem->pages[mem->page_count++] = page;
> +	}
> +	agp_be->mem = mem;
> +
> +	mem->is_flushed = 1;
> +	mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
> +
> +	ret = agp_bind_memory(mem, node->start);
> +	if (ret)
> +		pr_err("AGP Bind memory failed\n");
> +
> +	return ret;
> +}
> +
> +static int ttm_agp_unbind(struct ttm_tt *ttm)
> +{
> +	struct ttm_agp_backend *agp_be = container_of(ttm, struct  
> ttm_agp_backend, ttm);
> +
> +	if (agp_be->mem) {
> +		if (agp_be->mem->is_bound)
> +			return agp_unbind_memory(agp_be->mem);
> +		agp_free_memory(agp_be->mem);
> +		agp_be->mem = NULL;
> +	}
> +	return 0;
> +}
> +
> +static void ttm_agp_destroy(struct ttm_tt *ttm)
> +{
> +	struct ttm_agp_backend *agp_be = container_of(ttm, struct  
> ttm_agp_backend, ttm);
> +
> +	if (agp_be->mem)
> +		ttm_agp_unbind(ttm);
> +	ttm_tt_fini(ttm);
> +	free(agp_be, M_TTM_AGP);
> +}
> +
> +static struct ttm_backend_func ttm_agp_func = {
> +	.bind = ttm_agp_bind,
> +	.unbind = ttm_agp_unbind,
> +	.destroy = ttm_agp_destroy,
> +};
> +
> +struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
> +				 device_t bridge,
> +				 unsigned long size, uint32_t page_flags,
> +				 vm_page_t dummy_read_page)
> +{
> +	struct ttm_agp_backend *agp_be;
> +
> +	agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
> +
> +	agp_be->mem = NULL;
> +	agp_be->bridge = bridge;
> +	agp_be->ttm.func = &ttm_agp_func;
> +
> +	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags,  
> dummy_read_page)) {
> +		return NULL;
> +	}
> +
> +	return &agp_be->ttm;
> +}
> +
> +int ttm_agp_tt_populate(struct ttm_tt *ttm)
> +{
> +	if (ttm->state != tt_unpopulated)
> +		return 0;
> +
> +	return ttm_pool_populate(ttm);
> +}
> +
> +void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
> +{
> +	ttm_pool_unpopulate(ttm);
> +}
> +
> +#endif
>
> Added: head/sys/dev/drm2/ttm/ttm_bo.c
> ==============================================================================
> --- /dev/null	00:00:00 1970	(empty, because file is newly added)
> +++ head/sys/dev/drm2/ttm/ttm_bo.c	Tue Mar  5 09:49:34 2013	(r247835)
> @@ -0,0 +1,1820 @@
> +/**************************************************************************
> + *
> + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
> + * All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person  
> obtaining a
> + * copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sub license, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial  
> portions
> + * of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,  
> EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF  
> MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT  
> SHALL
> + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR  
> ANY CLAIM,
> + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
> + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE  
> OR THE
> + * USE OR OTHER DEALINGS IN THE SOFTWARE.
> + *
> +  
> **************************************************************************/
> +/*
> + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
> + */
> +
> +#include <sys/cdefs.h>
> +__FBSDID("$FreeBSD$");
> +
> +#include <dev/drm2/drmP.h>
> +#include <dev/drm2/ttm/ttm_module.h>
> +#include <dev/drm2/ttm/ttm_bo_driver.h>
> +#include <dev/drm2/ttm/ttm_placement.h>
> +
> +#define TTM_ASSERT_LOCKED(param)
> +#define TTM_DEBUG(fmt, arg...)
> +#define TTM_BO_HASH_ORDER 13
> +
> +static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
> +static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
> +static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
> +
> +MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
> +
> +static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t  
> *mem_type)
> +{
> +	int i;
> +
> +	for (i = 0; i <= TTM_PL_PRIV5; i++)
> +		if (flags & (1 << i)) {
> +			*mem_type = i;
> +			return 0;
> +		}
> +	return -EINVAL;
> +}
> +
> +static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
> +{
> +	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
> +
> +	printf("    has_type: %d\n", man->has_type);
> +	printf("    use_type: %d\n", man->use_type);
> +	printf("    flags: 0x%08X\n", man->flags);
> +	printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
> +	printf("    size: %ju\n", (uintmax_t)man->size);
> +	printf("    available_caching: 0x%08X\n", man->available_caching);
> +	printf("    default_caching: 0x%08X\n", man->default_caching);
> +	if (mem_type != TTM_PL_SYSTEM)
> +		(*man->func->debug)(man, TTM_PFX);
> +}
> +
> +static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
> +					struct ttm_placement *placement)
> +{
> +	int i, ret, mem_type;
> +
> +	printf("No space for %p (%lu pages, %luK, %luM)\n",
> +	       bo, bo->mem.num_pages, bo->mem.size >> 10,
> +	       bo->mem.size >> 20);
> +	for (i = 0; i < placement->num_placement; i++) {
> +		ret = ttm_mem_type_from_flags(placement->placement[i],
> +						&mem_type);
> +		if (ret)
> +			return;
> +		printf("  placement[%d]=0x%08X (%d)\n",
> +		       i, placement->placement[i], mem_type);
> +		ttm_mem_type_debug(bo->bdev, mem_type);
> +	}
> +}
> +
> +#if 0
> +static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
> +    char *buffer)
> +{
> +
> +	return snprintf(buffer, PAGE_SIZE, "%lu\n",
> +			(unsigned long) atomic_read(&glob->bo_count));
> +}
> +#endif
> +
> +static inline uint32_t ttm_bo_type_flags(unsigned type)
> +{
> +	return 1 << (type);
> +}
> +
> +static void ttm_bo_release_list(struct ttm_buffer_object *bo)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	size_t acc_size = bo->acc_size;
> +
> +	MPASS(atomic_read(&bo->list_kref) == 0);
> +	MPASS(atomic_read(&bo->kref) == 0);
> +	MPASS(atomic_read(&bo->cpu_writers) == 0);
> +	MPASS(bo->sync_obj == NULL);
> +	MPASS(bo->mem.mm_node == NULL);
> +	MPASS(list_empty(&bo->lru));
> +	MPASS(list_empty(&bo->ddestroy));
> +
> +	if (bo->ttm)
> +		ttm_tt_destroy(bo->ttm);
> +	atomic_dec(&bo->glob->bo_count);
> +	if (bo->destroy)
> +		bo->destroy(bo);
> +	else {
> +		free(bo, M_TTM_BO);
> +	}
> +	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
> +}
> +
> +int
> +ttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool  
> interruptible)
> +{
> +	const char *wmsg;
> +	int flags, ret;
> +
> +	ret = 0;
> +	if (interruptible) {
> +		flags = PCATCH;
> +		wmsg = "ttbowi";
> +	} else {
> +		flags = 0;
> +		wmsg = "ttbowu";
> +	}
> +	while (!ttm_bo_is_reserved(bo)) {
> +		ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
> +		if (ret != 0)
> +			break;
> +	}
> +	return (ret);
> +}
> +
> +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	struct ttm_mem_type_manager *man;
> +
> +	MPASS(ttm_bo_is_reserved(bo));
> +
> +	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
> +
> +		MPASS(list_empty(&bo->lru));
> +
> +		man = &bdev->man[bo->mem.mem_type];
> +		list_add_tail(&bo->lru, &man->lru);
> +		refcount_acquire(&bo->list_kref);
> +
> +		if (bo->ttm != NULL) {
> +			list_add_tail(&bo->swap, &bo->glob->swap_lru);
> +			refcount_acquire(&bo->list_kref);
> +		}
> +	}
> +}
> +
> +int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
> +{
> +	int put_count = 0;
> +
> +	if (!list_empty(&bo->swap)) {
> +		list_del_init(&bo->swap);
> +		++put_count;
> +	}
> +	if (!list_empty(&bo->lru)) {
> +		list_del_init(&bo->lru);
> +		++put_count;
> +	}
> +
> +	/*
> +	 * TODO: Add a driver hook to delete from
> +	 * driver-specific LRU's here.
> +	 */
> +
> +	return put_count;
> +}
> +
> +int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
> +			  bool interruptible,
> +			  bool no_wait, bool use_sequence, uint32_t sequence)
> +{
> +	int ret;
> +
> +	while (unlikely(atomic_read(&bo->reserved) != 0)) {
> +		/**
> +		 * Deadlock avoidance for multi-bo reserving.
> +		 */
> +		if (use_sequence && bo->seq_valid) {
> +			/**
> +			 * We've already reserved this one.
> +			 */
> +			if (unlikely(sequence == bo->val_seq))
> +				return -EDEADLK;
> +			/**
> +			 * Already reserved by a thread that will not back
> +			 * off for us. We need to back off.
> +			 */
> +			if (unlikely(sequence - bo->val_seq < (1 << 31)))
> +				return -EAGAIN;
> +		}
> +
> +		if (no_wait)
> +			return -EBUSY;
> +
> +		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
> +		if (unlikely(ret))
> +			return ret;
> +	}
> +
> +	atomic_set(&bo->reserved, 1);
> +	if (use_sequence) {
> +		/**
> +		 * Wake up waiters that may need to recheck for deadlock,
> +		 * if we decreased the sequence number.
> +		 */
> +		if (unlikely((bo->val_seq - sequence < (1 << 31))
> +			     || !bo->seq_valid))
> +			wakeup(bo);
> +
> +		bo->val_seq = sequence;
> +		bo->seq_valid = true;
> +	} else {
> +		bo->seq_valid = false;
> +	}
> +
> +	return 0;
> +}
> +
> +void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
> +			 bool never_free)
> +{
> +	u_int old;
> +
> +	old = atomic_fetchadd_int(&bo->list_kref, -count);
> +	if (old <= count) {
> +		if (never_free)
> +			panic("ttm_bo_ref_buf");
> +		ttm_bo_release_list(bo);
> +	}
> +}
> +
> +int ttm_bo_reserve(struct ttm_buffer_object *bo,
> +		   bool interruptible,
> +		   bool no_wait, bool use_sequence, uint32_t sequence)
> +{
> +	struct ttm_bo_global *glob = bo->glob;
> +	int put_count = 0;
> +	int ret;
> +
> +	mtx_lock(&glob->lru_lock);
> +	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
> +				    sequence);
> +	if (likely(ret == 0))
> +		put_count = ttm_bo_del_from_lru(bo);
> +	mtx_unlock(&glob->lru_lock);
> +
> +	ttm_bo_list_ref_sub(bo, put_count, true);
> +
> +	return ret;
> +}
> +
> +void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
> +{
> +	ttm_bo_add_to_lru(bo);
> +	atomic_set(&bo->reserved, 0);
> +	wakeup(bo);
> +}
> +
> +void ttm_bo_unreserve(struct ttm_buffer_object *bo)
> +{
> +	struct ttm_bo_global *glob = bo->glob;
> +
> +	mtx_lock(&glob->lru_lock);
> +	ttm_bo_unreserve_locked(bo);
> +	mtx_unlock(&glob->lru_lock);
> +}
> +
> +/*
> + * Call bo->mutex locked.
> + */
> +static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	struct ttm_bo_global *glob = bo->glob;
> +	int ret = 0;
> +	uint32_t page_flags = 0;
> +
> +	TTM_ASSERT_LOCKED(&bo->mutex);
> +	bo->ttm = NULL;
> +
> +	if (bdev->need_dma32)
> +		page_flags |= TTM_PAGE_FLAG_DMA32;
> +
> +	switch (bo->type) {
> +	case ttm_bo_type_device:
> +		if (zero_alloc)
> +			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
> +	case ttm_bo_type_kernel:
> +		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages <<  
> PAGE_SHIFT,
> +						      page_flags, glob->dummy_read_page);
> +		if (unlikely(bo->ttm == NULL))
> +			ret = -ENOMEM;
> +		break;
> +	case ttm_bo_type_sg:
> +		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages <<  
> PAGE_SHIFT,
> +						      page_flags | TTM_PAGE_FLAG_SG,
> +						      glob->dummy_read_page);
> +		if (unlikely(bo->ttm == NULL)) {
> +			ret = -ENOMEM;
> +			break;
> +		}
> +		bo->ttm->sg = bo->sg;
> +		break;
> +	default:
> +		printf("[TTM] Illegal buffer object type\n");
> +		ret = -EINVAL;
> +		break;
> +	}
> +
> +	return ret;
> +}
> +
> +static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
> +				  struct ttm_mem_reg *mem,
> +				  bool evict, bool interruptible,
> +				  bool no_wait_gpu)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
> +	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
> +	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
> +	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
> +	int ret = 0;
> +
> +	if (old_is_pci || new_is_pci ||
> +	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))  
> {
> +		ret = ttm_mem_io_lock(old_man, true);
> +		if (unlikely(ret != 0))
> +			goto out_err;
> +		ttm_bo_unmap_virtual_locked(bo);
> +		ttm_mem_io_unlock(old_man);
> +	}
> +
> +	/*
> +	 * Create and bind a ttm if required.
> +	 */
> +
> +	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
> +		if (bo->ttm == NULL) {
> +			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
> +			ret = ttm_bo_add_ttm(bo, zero);
> +			if (ret)
> +				goto out_err;
> +		}
> +
> +		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
> +		if (ret)
> +			goto out_err;
> +
> +		if (mem->mem_type != TTM_PL_SYSTEM) {
> +			ret = ttm_tt_bind(bo->ttm, mem);
> +			if (ret)
> +				goto out_err;
> +		}
> +
> +		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
> +			if (bdev->driver->move_notify)
> +				bdev->driver->move_notify(bo, mem);
> +			bo->mem = *mem;
> +			mem->mm_node = NULL;
> +			goto moved;
> +		}
> +	}
> +
> +	if (bdev->driver->move_notify)
> +		bdev->driver->move_notify(bo, mem);
> +
> +	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
> +	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
> +		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
> +	else if (bdev->driver->move)
> +		ret = bdev->driver->move(bo, evict, interruptible,
> +					 no_wait_gpu, mem);
> +	else
> +		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
> +
> +	if (ret) {
> +		if (bdev->driver->move_notify) {
> +			struct ttm_mem_reg tmp_mem = *mem;
> +			*mem = bo->mem;
> +			bo->mem = tmp_mem;
> +			bdev->driver->move_notify(bo, mem);
> +			bo->mem = *mem;
> +		}
> +
> +		goto out_err;
> +	}
> +
> +moved:
> +	if (bo->evicted) {
> +		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
> +		if (ret)
> +			printf("[TTM] Can not flush read caches\n");
> +		bo->evicted = false;
> +	}
> +
> +	if (bo->mem.mm_node) {
> +		bo->offset = (bo->mem.start << PAGE_SHIFT) +
> +		    bdev->man[bo->mem.mem_type].gpu_offset;
> +		bo->cur_placement = bo->mem.placement;
> +	} else
> +		bo->offset = 0;
> +
> +	return 0;
> +
> +out_err:
> +	new_man = &bdev->man[bo->mem.mem_type];
> +	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
> +		ttm_tt_unbind(bo->ttm);
> +		ttm_tt_destroy(bo->ttm);
> +		bo->ttm = NULL;
> +	}
> +
> +	return ret;
> +}
> +
> +/**
> + * Call bo::reserved.
> + * Will release GPU memory type usage on destruction.
> + * This is the place to put in driver specific hooks to release
> + * driver private resources.
> + * Will release the bo::reserved lock.
> + */
> +
> +static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
> +{
> +	if (bo->bdev->driver->move_notify)
> +		bo->bdev->driver->move_notify(bo, NULL);
> +
> +	if (bo->ttm) {
> +		ttm_tt_unbind(bo->ttm);
> +		ttm_tt_destroy(bo->ttm);
> +		bo->ttm = NULL;
> +	}
> +	ttm_bo_mem_put(bo, &bo->mem);
> +
> +	atomic_set(&bo->reserved, 0);
> +	wakeup(&bo);
> +
> +	/*
> +	 * Since the final reference to this bo may not be dropped by
> +	 * the current task we have to put a memory barrier here to make
> +	 * sure the changes done in this function are always visible.
> +	 *
> +	 * This function only needs protection against the final kref_put.
> +	 */
> +	mb();
> +}
> +
> +static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	struct ttm_bo_global *glob = bo->glob;
> +	struct ttm_bo_driver *driver = bdev->driver;
> +	void *sync_obj = NULL;
> +	int put_count;
> +	int ret;
> +
> +	mtx_lock(&glob->lru_lock);
> +	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
> +
> +	mtx_lock(&bdev->fence_lock);
> +	(void) ttm_bo_wait(bo, false, false, true);
> +	if (!ret && !bo->sync_obj) {
> +		mtx_unlock(&bdev->fence_lock);
> +		put_count = ttm_bo_del_from_lru(bo);
> +
> +		mtx_unlock(&glob->lru_lock);
> +		ttm_bo_cleanup_memtype_use(bo);
> +
> +		ttm_bo_list_ref_sub(bo, put_count, true);
> +
> +		return;
> +	}
> +	if (bo->sync_obj)
> +		sync_obj = driver->sync_obj_ref(bo->sync_obj);
> +	mtx_unlock(&bdev->fence_lock);
> +
> +	if (!ret) {
> +		atomic_set(&bo->reserved, 0);
> +		wakeup(bo);
> +	}
> +
> +	refcount_acquire(&bo->list_kref);
> +	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
> +	mtx_unlock(&glob->lru_lock);
> +
> +	if (sync_obj) {
> +		driver->sync_obj_flush(sync_obj);
> +		driver->sync_obj_unref(&sync_obj);
> +	}
> +	taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
> +	    ((hz / 100) < 1) ? 1 : hz / 100);
> +}
> +
> +/**
> + * function ttm_bo_cleanup_refs_and_unlock
> + * If bo idle, remove from delayed- and lru lists, and unref.
> + * If not idle, do nothing.
> + *
> + * Must be called with lru_lock and reservation held, this function
> + * will drop both before returning.
> + *
> + * @interruptible         Any sleeps should occur interruptibly.
> + * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
> + */
> +
> +static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
> +					  bool interruptible,
> +					  bool no_wait_gpu)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	struct ttm_bo_driver *driver = bdev->driver;
> +	struct ttm_bo_global *glob = bo->glob;
> +	int put_count;
> +	int ret;
> +
> +	mtx_lock(&bdev->fence_lock);
> +	ret = ttm_bo_wait(bo, false, false, true);
> +
> +	if (ret && !no_wait_gpu) {
> +		void *sync_obj;
> +
> +		/*
> +		 * Take a reference to the fence and unreserve,
> +		 * at this point the buffer should be dead, so
> +		 * no new sync objects can be attached.
> +		 */
> +		sync_obj = driver->sync_obj_ref(bo->sync_obj);
> +		mtx_unlock(&bdev->fence_lock);
> +
> +		atomic_set(&bo->reserved, 0);
> +		wakeup(bo);
> +		mtx_unlock(&glob->lru_lock);
> +
> +		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
> +		driver->sync_obj_unref(&sync_obj);
> +		if (ret)
> +			return ret;
> +
> +		/*
> +		 * remove sync_obj with ttm_bo_wait, the wait should be
> +		 * finished, and no new wait object should have been added.
> +		 */
> +		mtx_lock(&bdev->fence_lock);
> +		ret = ttm_bo_wait(bo, false, false, true);
> +		mtx_unlock(&bdev->fence_lock);
> +		if (ret)
> +			return ret;
> +
> +		mtx_lock(&glob->lru_lock);
> +		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
> +
> +		/*
> +		 * We raced, and lost, someone else holds the reservation now,
> +		 * and is probably busy in ttm_bo_cleanup_memtype_use.
> +		 *
> +		 * Even if it's not the case, because we finished waiting any
> +		 * delayed destruction would succeed, so just return success
> +		 * here.
> +		 */
> +		if (ret) {
> +			mtx_unlock(&glob->lru_lock);
> +			return 0;
> +		}
> +	} else
> +		mtx_unlock(&bdev->fence_lock);
> +
> +	if (ret || unlikely(list_empty(&bo->ddestroy))) {
> +		atomic_set(&bo->reserved, 0);
> +		wakeup(bo);
> +		mtx_unlock(&glob->lru_lock);
> +		return ret;
> +	}
> +
> +	put_count = ttm_bo_del_from_lru(bo);
> +	list_del_init(&bo->ddestroy);
> +	++put_count;
> +
> +	mtx_unlock(&glob->lru_lock);
> +	ttm_bo_cleanup_memtype_use(bo);
> +
> +	ttm_bo_list_ref_sub(bo, put_count, true);
> +
> +	return 0;
> +}
> +
> +/**
> + * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
> + * encountered buffers.
> + */
> +
> +static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool  
> remove_all)
> +{
> +	struct ttm_bo_global *glob = bdev->glob;
> +	struct ttm_buffer_object *entry = NULL;
> +	int ret = 0;
> +
> +	mtx_lock(&glob->lru_lock);
> +	if (list_empty(&bdev->ddestroy))
> +		goto out_unlock;
> +
> +	entry = list_first_entry(&bdev->ddestroy,
> +		struct ttm_buffer_object, ddestroy);
> +	refcount_acquire(&entry->list_kref);
> +
> +	for (;;) {
> +		struct ttm_buffer_object *nentry = NULL;
> +
> +		if (entry->ddestroy.next != &bdev->ddestroy) {
> +			nentry = list_first_entry(&entry->ddestroy,
> +				struct ttm_buffer_object, ddestroy);
> +			refcount_acquire(&nentry->list_kref);
> +		}
> +
> +		ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
> +		if (!ret)
> +			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
> +							     !remove_all);
> +		else
> +			mtx_unlock(&glob->lru_lock);
> +
> +		if (refcount_release(&entry->list_kref))
> +			ttm_bo_release_list(entry);
> +		entry = nentry;
> +
> +		if (ret || !entry)
> +			goto out;
> +
> +		mtx_lock(&glob->lru_lock);
> +		if (list_empty(&entry->ddestroy))
> +			break;
> +	}
> +
> +out_unlock:
> +	mtx_unlock(&glob->lru_lock);
> +out:
> +	if (entry && refcount_release(&entry->list_kref))
> +		ttm_bo_release_list(entry);
> +	return ret;
> +}
> +
> +static void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
> +{
> +	struct ttm_bo_device *bdev = arg;
> +
> +	if (ttm_bo_delayed_delete(bdev, false)) {
> +		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
> +		    ((hz / 100) < 1) ? 1 : hz / 100);
> +	}
> +}
> +
> +static void ttm_bo_release(struct ttm_buffer_object *bo)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
> +
> +	rw_wlock(&bdev->vm_lock);
> +	if (likely(bo->vm_node != NULL)) {
> +		RB_REMOVE(ttm_bo_device_buffer_objects,
> +		    &bdev->addr_space_rb, bo);
> +		drm_mm_put_block(bo->vm_node);
> +		bo->vm_node = NULL;
> +	}
> +	rw_wunlock(&bdev->vm_lock);
> +	ttm_mem_io_lock(man, false);
> +	ttm_mem_io_free_vm(bo);
> +	ttm_mem_io_unlock(man);
> +	ttm_bo_cleanup_refs_or_queue(bo);
> +	if (refcount_release(&bo->list_kref))
> +		ttm_bo_release_list(bo);
> +}
> +
> +void ttm_bo_unref(struct ttm_buffer_object **p_bo)
> +{
> +	struct ttm_buffer_object *bo = *p_bo;
> +
> +	*p_bo = NULL;
> +	if (refcount_release(&bo->kref))
> +		ttm_bo_release(bo);
> +}
> +
> +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
> +{
> +	int pending;
> +
> +	taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
> +	if (pending)
> +		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
> +	return (pending);
> +}
> +
> +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int  
> resched)
> +{
> +	if (resched) {
> +		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
> +		    ((hz / 100) < 1) ? 1 : hz / 100);
> +	}
> +}
> +
> +static int ttm_bo_evict(struct ttm_buffer_object *bo, bool  
> interruptible,
> +			bool no_wait_gpu)
> +{
> +	struct ttm_bo_device *bdev = bo->bdev;
> +	struct ttm_mem_reg evict_mem;
> +	struct ttm_placement placement;
> +	int ret = 0;
> +
> +	mtx_lock(&bdev->fence_lock);
> +	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
> +	mtx_unlock(&bdev->fence_lock);
> +
> +	if (unlikely(ret != 0)) {
> +		if (ret != -ERESTART) {
> +			printf("[TTM] Failed to expire sync object before buffer  
> eviction\n");
> +		}
> +		goto out;
> +	}
> +
> +	MPASS(ttm_bo_is_reserved(bo));
> +
> +	evict_mem = bo->mem;
> +	evict_mem.mm_node = NULL;
> +	evict_mem.bus.io_reserved_vm = false;
> +	evict_mem.bus.io_reserved_count = 0;
> +
> +	placement.fpfn = 0;
>
> *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
> _______________________________________________
> svn-src-all@freebsd.org mailing list
> http://lists.freebsd.org/mailman/listinfo/svn-src-all
> To unsubscribe, send any mail to "svn-src-all-unsubscribe@freebsd.org"



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?op.wthcpc0j8527sy>