All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jerome Glisse <jglisse@redhat.com>
To: airlied@gmail.com
Cc: thellstrom@vmware.com, skeggsb@gmail.com,
	Jerome Glisse <jglisse@redhat.com>,
	dri-devel@lists.sf.net
Subject: [PATCH 1/9] drm/ttm: ttm_fault callback to allow driver to handle bo placement V2
Date: Mon, 22 Feb 2010 18:11:29 +0100	[thread overview]
Message-ID: <1266858699-23337-3-git-send-email-jglisse@redhat.com> (raw)
In-Reply-To: <1266858699-23337-2-git-send-email-jglisse@redhat.com>

On fault the driver is given the opportunity to perform any operation
it sees fit in order to place the buffer into a CPU visible area of
memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
should keep working properly. Future patch will take advantage of this
infrastructure and remove the old path from TTM once driver are
converted.

V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c      |    3 +-
 drivers/gpu/drm/ttm/ttm_bo_util.c |   92 +++++++++++++++++--------------------
 drivers/gpu/drm/ttm/ttm_bo_vm.c   |   56 ++++++++++++----------
 include/drm/ttm/ttm_bo_api.h      |    1 +
 include/drm/ttm/ttm_bo_driver.h   |   30 ++++++++++++
 5 files changed, 105 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c7320ce..28f3fcf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1581,7 +1581,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
 	if (!bdev->dev_mapping)
 		return;
-
+	if (bdev->driver->io_mem_free)
+		bdev->driver->io_mem_free(bdev, &bo->mem);
 	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3f72fe1..10c5fc6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -84,26 +84,36 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			void **virtual)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-	unsigned long bus_offset;
-	unsigned long bus_size;
-	unsigned long bus_base;
+	struct ttm_bus_placement pl;
 	int ret;
 	void *addr;
 
 	*virtual = NULL;
-	ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
-	if (ret || bus_size == 0)
-		return ret;
+	if (bdev->driver->io_mem_reserve) {
+		ret = bdev->driver->io_mem_reserve(bdev, mem, &pl);
+		if (unlikely(ret != 0)) {
+			return ret;
+		}
+	} else {
+		ret = ttm_bo_pci_offset(bdev, mem, &pl.base, &pl.offset, &pl.size);
+		if (unlikely(ret != 0) || pl.size == 0) {
+			return ret;
+		}
+		pl.is_iomem = (pl.size != 0);
+	}
 
 	if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-		addr = (void *)(((u8 *) man->io_addr) + bus_offset);
+		addr = (void *)(pl.base + pl.offset);
 	else {
 		if (mem->placement & TTM_PL_FLAG_WC)
-			addr = ioremap_wc(bus_base + bus_offset, bus_size);
+			addr = ioremap_wc(pl.base + pl.offset, pl.size);
 		else
-			addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-		if (!addr)
+			addr = ioremap_nocache(pl.base + pl.offset, pl.size);
+		if (!addr) {
+			if (bdev->driver->io_mem_free)
+				bdev->driver->io_mem_free(bdev, mem);
 			return -ENOMEM;
+		}
 	}
 	*virtual = addr;
 	return 0;
@@ -118,6 +128,8 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 
 	if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
 		iounmap(virtual);
+	if (bdev->driver->io_mem_free)
+		bdev->driver->io_mem_free(bdev, mem);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -440,13 +452,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 		unsigned long start_page, unsigned long num_pages,
 		struct ttm_bo_kmap_obj *map)
 {
+	struct ttm_bus_placement pl;
 	int ret;
-	unsigned long bus_base;
-	unsigned long bus_offset;
-	unsigned long bus_size;
 
 	BUG_ON(!list_empty(&bo->swap));
 	map->virtual = NULL;
+	map->bo = bo;
 	if (num_pages > bo->num_pages)
 		return -EINVAL;
 	if (start_page > bo->num_pages)
@@ -455,16 +466,24 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
 		return -EPERM;
 #endif
-	ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
-				&bus_offset, &bus_size);
-	if (ret)
-		return ret;
-	if (bus_size == 0) {
+	if (bo->bdev->driver->io_mem_reserve) {
+		ret = bo->bdev->driver->io_mem_reserve(bo->bdev, &bo->mem, &pl);
+		if (unlikely(ret != 0)) {
+			return ret;
+		}
+	} else {
+		ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &pl.base, &pl.offset, &pl.size);
+		if (unlikely(ret != 0)) {
+			return ret;
+		}
+		pl.is_iomem = (pl.size != 0);
+	}
+	if (!pl.is_iomem) {
 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
 	} else {
-		bus_offset += start_page << PAGE_SHIFT;
-		bus_size = num_pages << PAGE_SHIFT;
-		return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+		pl.offset += start_page << PAGE_SHIFT;
+		pl.size = num_pages << PAGE_SHIFT;
+		return ttm_bo_ioremap(bo, pl.base, pl.offset, pl.size, map);
 	}
 }
 EXPORT_SYMBOL(ttm_bo_kmap);
@@ -476,6 +495,8 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 	switch (map->bo_kmap_type) {
 	case ttm_bo_map_iomap:
 		iounmap(map->virtual);
+		if (map->bo->bdev->driver->io_mem_free)
+			map->bo->bdev->driver->io_mem_free(map->bo->bdev, &map->bo->mem);
 		break;
 	case ttm_bo_map_vmap:
 		vunmap(map->virtual);
@@ -493,35 +514,6 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 }
 EXPORT_SYMBOL(ttm_bo_kunmap);
 
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
-		    unsigned long dst_offset,
-		    unsigned long *pfn, pgprot_t *prot)
-{
-	struct ttm_mem_reg *mem = &bo->mem;
-	struct ttm_bo_device *bdev = bo->bdev;
-	unsigned long bus_offset;
-	unsigned long bus_size;
-	unsigned long bus_base;
-	int ret;
-	ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
-			&bus_size);
-	if (ret)
-		return -EINVAL;
-	if (bus_size != 0)
-		*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
-	else
-		if (!bo->ttm)
-			return -EINVAL;
-		else
-			*pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
-							   dst_offset >>
-							   PAGE_SHIFT));
-	*prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
-		PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
-	return 0;
-}
-
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 			      void *sync_obj,
 			      void *sync_obj_arg,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8..fe4ac95 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 	    vma->vm_private_data;
 	struct ttm_bo_device *bdev = bo->bdev;
-	unsigned long bus_base;
-	unsigned long bus_offset;
-	unsigned long bus_size;
+	struct ttm_bus_placement pl;
 	unsigned long page_offset;
 	unsigned long page_last;
 	unsigned long pfn;
@@ -84,7 +82,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct page *page;
 	int ret;
 	int i;
-	bool is_iomem;
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	int retval = VM_FAULT_NOPAGE;
 
@@ -104,11 +101,33 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (bdev->driver->fault_reserve_notify)
 		bdev->driver->fault_reserve_notify(bo);
 
+	if (bdev->driver->fault_reserve) {
+		ret = bdev->driver->fault_reserve(bo, &pl);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			set_need_resched();
+		case -ERESTARTSYS:
+			retval = VM_FAULT_NOPAGE;
+			goto out_unlock;
+		default:
+			retval = VM_FAULT_SIGBUS;
+			goto out_unlock;
+		}
+	} else {
+		ret = ttm_bo_pci_offset(bdev, &bo->mem, &pl.base, &pl.offset, &pl.size);
+		if (unlikely(ret != 0)) {
+			retval = VM_FAULT_SIGBUS;
+			goto out_unlock;
+		}
+		pl.is_iomem = (pl.size != 0);
+	}
+
 	/*
 	 * Wait for buffer data in transit, due to a pipelined
 	 * move.
 	 */
-
 	spin_lock(&bo->lock);
 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
 		ret = ttm_bo_wait(bo, false, true, false);
@@ -122,20 +141,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		spin_unlock(&bo->lock);
 
 
-	ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
-				&bus_size);
-	if (unlikely(ret != 0)) {
-		retval = VM_FAULT_SIGBUS;
-		goto out_unlock;
-	}
-
-	is_iomem = (bus_size != 0);
-
 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
 	    bo->vm_node->start - vma->vm_pgoff;
 	page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
 	    bo->vm_node->start - vma->vm_pgoff;
-
 	if (unlikely(page_offset >= bo->num_pages)) {
 		retval = VM_FAULT_SIGBUS;
 		goto out_unlock;
@@ -154,8 +163,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	 * vma->vm_page_prot when the object changes caching policy, with
 	 * the correct locks held.
 	 */
-
-	if (is_iomem) {
+	if (pl.is_iomem) {
 		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
 						vma->vm_page_prot);
 	} else {
@@ -169,12 +177,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	 * Speculatively prefault a number of pages. Only error on
 	 * first page.
 	 */
-
 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
-		if (is_iomem)
-			pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
-			    page_offset;
+		if (pl.is_iomem)
+			pfn = ((pl.base + pl.offset) >> PAGE_SHIFT) + page_offset;
 		else {
 			page = ttm_tt_get_page(ttm, page_offset);
 			if (unlikely(!page && i == 0)) {
@@ -191,14 +196,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		 * Somebody beat us to this PTE or prefaulting to
 		 * an already populated PTE, or prefaulting error.
 		 */
-
 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
 			break;
 		else if (unlikely(ret != 0)) {
 			retval =
 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 			goto out_unlock;
-
 		}
 
 		address += PAGE_SIZE;
@@ -221,9 +224,10 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
 
 static void ttm_bo_vm_close(struct vm_area_struct *vma)
 {
-	struct ttm_buffer_object *bo =
-	    (struct ttm_buffer_object *)vma->vm_private_data;
+	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
 
+	if (bo->bdev->driver->io_mem_free)
+		bo->bdev->driver->io_mem_free(bo->bdev, &bo->mem);
 	ttm_bo_unref(&bo);
 	vma->vm_private_data = NULL;
 }
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 81eb9f4..1b38c67 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -274,6 +274,7 @@ struct ttm_bo_kmap_obj {
 		ttm_bo_map_kmap         = 3,
 		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
 	} bo_kmap_type;
+	struct ttm_buffer_object *bo;
 };
 
 /**
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4c4e0f8..923fecf 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -240,6 +240,23 @@ struct ttm_mem_type_manager {
 };
 
 /**
+ * struct ttm_bus_placement
+ *
+ * @base:		bus base address
+ * @is_iomem:		is this io memory ?
+ * @size:		size in byte
+ * @offset:		offset from the base address
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+	unsigned long	base;
+	unsigned long	size;
+	unsigned long	offset;
+	bool		is_iomem;
+};
+
+/**
  * struct ttm_bo_driver
  *
  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
@@ -358,6 +375,19 @@ struct ttm_bo_driver {
 	 * notify the driver that we're about to swap out this bo
 	 */
 	void (*swap_notify) (struct ttm_buffer_object *bo);
+
+	/**
+	 * Driver callback on bo fault, driver is responsible to fill the
+	 * bus placement and has the opportunity to move the buffer into
+	 * visible space.
+	 */
+	int (*fault_reserve)(struct ttm_buffer_object *bo, struct ttm_bus_placement *pl);
+	/**
+	 * Driver callback on when mapping io memory (for bo_move_memcpy for instance).
+	 * TTM will take care to call io_mem_free whenever the mapping is not use anymore.
+	 */
+	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, struct ttm_bus_placement *pl);
+	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
 };
 
 /**
-- 
1.6.6


------------------------------------------------------------------------------
Download Intel&#174; Parallel Studio Eval
Try the new software tools for yourself. Speed compiling, find bugs
proactively, and fine-tune applications for parallel performance.
See why Intel Parallel Studio got high marks during beta.
http://p.sf.net/sfu/intel-sw-dev
--

  reply	other threads:[~2010-02-22 17:11 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-02-22 17:11 Unmappable VRAM patchset V3 Jerome Glisse
2010-02-22 17:11 ` [PATCH 1/9] drm/ttm: add ttm_fault callback to allow driver to handle bo placement Jerome Glisse
2010-02-22 17:11   ` Jerome Glisse [this message]
2010-02-22 17:11     ` [PATCH 2/9] drm/radeon/kms: add support for new fault callback Jerome Glisse
2010-02-22 17:11       ` [PATCH 2/9] drm/radeon/kms: add support for new fault callback V2 Jerome Glisse
2010-02-22 17:11         ` [PATCH 3/9] drm/nouveau/kms: add support for new TTM fault callback Jerome Glisse
2010-02-22 17:11           ` [PATCH 4/9] drm/vmwgfx: " Jerome Glisse
2010-02-22 17:11             ` [PATCH 5/9] drm/radeon/kms: don't initialize TTM io memory manager field Jerome Glisse
2010-02-22 17:11               ` [PATCH 6/9] drm/nouveau/kms: " Jerome Glisse
2010-02-22 17:11                 ` [PATCH 7/9] drm/vmwgfx: " Jerome Glisse
2010-02-22 17:11                   ` [PATCH 8/9] drm/ttm: remove io_ field from TTM Jerome Glisse
2010-02-22 17:11                     ` [PATCH 8/9] drm/ttm: remove io_ field from TTM V2 Jerome Glisse
2010-02-22 17:11                       ` [PATCH 9/9] drm/radeon/kms: enable use of unmappable VRAM Jerome Glisse
2010-03-17 12:57     ` [PATCH 1/9] drm/ttm: ttm_fault callback to allow driver to handle bo placement V2 Thomas Hellstrom
2010-02-22 17:30 ` Unmappable VRAM patchset V3 Thomas Hellstrom
2010-02-22 19:09   ` Jerome Glisse
     [not found]     ` <4B82E1E4.40909@vmware.com>
2010-02-23  9:59       ` Jerome Glisse
2010-02-23 13:05         ` Thomas Hellstrom
2010-02-24  9:57           ` Jerome Glisse
2010-02-24 12:37             ` Thomas Hellstrom
2010-02-24 15:58               ` Jerome Glisse
2010-02-24 17:04                 ` Thomas Hellstrom
2010-02-25  9:39                   ` Jerome Glisse
2010-02-23 14:40 Unmappable VRAM patchset V4 Jerome Glisse
2010-02-23 14:40 ` [PATCH 1/9] drm/ttm: ttm_fault callback to allow driver to handle bo placement V2 Jerome Glisse
2010-02-25 17:01 Unmappable VRAM patchset V4 Jerome Glisse
2010-02-25 17:01 ` [PATCH 1/9] drm/ttm: ttm_fault callback to allow driver to handle bo placement V2 Jerome Glisse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1266858699-23337-3-git-send-email-jglisse@redhat.com \
    --to=jglisse@redhat.com \
    --cc=airlied@gmail.com \
    --cc=dri-devel@lists.sf.net \
    --cc=skeggsb@gmail.com \
    --cc=thellstrom@vmware.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.