All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC
@ 2018-02-09  7:30 Roger He
  2018-02-09  7:30 ` [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean Roger He
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Roger He @ 2018-02-09  7:30 UTC (permalink / raw)
  To: dri-devel; +Cc: Roger He, Christian.Koenig

set TTM_OPT_FLAG_FORCE_ALLOC when we are servicing for page
fault routine.

for ttm_mem_global_reserve if in page fault routine, allow the gtt
pages reservation always. because page fault routing already grabbed
system memory and the allowance of this exception is harmless.
Otherwise, it will trigger OOM killer.

v2: keep original behavior except ttm bo with flag no_retry
v3: forward the ttm_operation_ctx to ttm_mem_global_reserve

Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo_vm.c          |  6 ++++--
 drivers/gpu/drm/ttm/ttm_memory.c         | 18 ++++++++++++++----
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |  1 -
 include/drm/ttm/ttm_bo_api.h             |  4 +++-
 4 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 716e724..313398a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -224,7 +224,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
 						cvma.vm_page_prot);
 	} else {
-		struct ttm_operation_ctx ctx = {
+		struct ttm_operation_ctx ttm_opt_ctx = {
 			.interruptible = false,
 			.no_wait_gpu = false
 		};
@@ -233,8 +233,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
 						cvma.vm_page_prot);
 
+		if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+			ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;
 		/* Allocate all page at once, most common usage */
-		if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
+		if (ttm->bdev->driver->ttm_tt_populate(ttm, &ttm_opt_ctx)) {
 			ret = VM_FAULT_OOM;
 			goto out_io_unlock;
 		}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index aa0c381..154719b 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -471,7 +471,8 @@ EXPORT_SYMBOL(ttm_mem_global_free);
 
 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 				  struct ttm_mem_zone *single_zone,
-				  uint64_t amount, bool reserve)
+				  uint64_t amount, bool reserve,
+				  struct ttm_operation_ctx *ctx)
 {
 	uint64_t limit;
 	int ret = -ENOMEM;
@@ -479,6 +480,15 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 	struct ttm_mem_zone *zone;
 
 	spin_lock(&glob->lock);
+	/*
+	 * to cover two special cases:
+	 * a. if serving page_fault allow reservation anyway since
+	 * it already allocated system pages. Otherwise it will trigger OOM.
+	 * b. if serving suspend, allow reservation anyway as well.
+	 */
+	if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
+		goto force_reserve;
+
 	for (i = 0; i < glob->num_zones; ++i) {
 		zone = glob->zones[i];
 		if (single_zone && zone != single_zone)
@@ -491,6 +501,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 			goto out_unlock;
 	}
 
+force_reserve:
 	if (reserve) {
 		for (i = 0; i < glob->num_zones; ++i) {
 			zone = glob->zones[i];
@@ -516,9 +527,8 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
 {
 	int count = TTM_MEMORY_ALLOC_RETRIES;
 
-	while (unlikely(ttm_mem_global_reserve(glob,
-					       single_zone,
-					       memory, true)
+	while (unlikely(ttm_mem_global_reserve(glob, single_zone,
+					       memory, true, ctx)
 			!= 0)) {
 		if (ctx->no_wait_gpu)
 			return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b122f6e..354e0e1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -944,7 +944,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 	i = 0;
 
 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
 		goto skip_huge;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 872ff6c..2142639 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -278,7 +278,9 @@ struct ttm_operation_ctx {
 };
 
 /* Allow eviction of reserved BOs */
-#define TTM_OPT_FLAG_ALLOW_RES_EVICT	0x1
+#define TTM_OPT_FLAG_ALLOW_RES_EVICT		0x1
+/* when serving page fault or suspend, allow alloc anyway */
+#define TTM_OPT_FLAG_FORCE_ALLOC		0x2
 
 /**
  * ttm_bo_reference - reference a struct ttm_buffer_object
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean
  2018-02-09  7:30 [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Roger He
@ 2018-02-09  7:30 ` Roger He
  2018-02-09  9:38   ` Christian König
  2018-02-09  7:30 ` [PATCH 3/3] drm/ttm: check if free mem space is under the lower limit Roger He
  2018-02-09  8:48 ` [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Christian König
  2 siblings, 1 reply; 6+ messages in thread
From: Roger He @ 2018-02-09  7:30 UTC (permalink / raw)
  To: dri-devel; +Cc: Roger He, Christian.Koenig

if it is  true, allocate TTM pages regardless of zone global memory
account limit. For example suspend, We should avoid TTM memory
allocate failure to lead to whole process fail.

Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a907311..685baad 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 EXPORT_SYMBOL(ttm_bo_create);
 
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-				   unsigned mem_type)
+			unsigned mem_type, bool force_alloc)
 {
-	struct ttm_operation_ctx ctx = { false, false };
+	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_bo_global *glob = bdev->glob;
 	struct dma_fence *fence;
 	int ret;
 	unsigned i;
 
+	if (force_alloc)
+		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;
 	/*
 	 * Can't use standard list traversal since we're unlocking.
 	 */
@@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		while (!list_empty(&man->lru[i])) {
 			spin_unlock(&glob->lru_lock);
-			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
+						  &ttm_opt_ctx);
 			if (ret)
 				return ret;
 			spin_lock(&glob->lru_lock);
@@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 
 	ret = 0;
 	if (mem_type > 0) {
-		ret = ttm_bo_force_list_clean(bdev, mem_type);
+		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
 		if (ret) {
 			pr_err("Cleanup eviction failed\n");
 			return ret;
@@ -1433,7 +1436,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 		return 0;
 	}
 
-	return ttm_bo_force_list_clean(bdev, mem_type);
+	return ttm_bo_force_list_clean(bdev, mem_type, true);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/3] drm/ttm: check if free mem space is under the lower limit
  2018-02-09  7:30 [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Roger He
  2018-02-09  7:30 ` [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean Roger He
@ 2018-02-09  7:30 ` Roger He
  2018-02-09  8:48 ` [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Christian König
  2 siblings, 0 replies; 6+ messages in thread
From: Roger He @ 2018-02-09  7:30 UTC (permalink / raw)
  To: dri-devel; +Cc: Roger He, Christian.Koenig

the free mem space and the lower limit both include two parts:
system memory and swap space.

For the OOM triggered by TTM, that is the case as below:
first swap space is full of swapped out pages and soon
system memory also is filled up with ttm pages. and then
any memory allocation request will run into OOM.

to cover two cases:
a. if no swap disk at all or free swap space is under swap mem
   limit but available system mem is bigger than sys mem limit,
   allow TTM allocation;

b. if the available system mem is less than sys mem limit but
   free swap space is bigger than swap mem limit, allow TTM
   allocation.

v2: merge two memory limit(swap and system) into one
v3: keep original behavior except ttm_opt_ctx->flags with
    TTM_OPT_FLAG_FORCE_ALLOC
v4: always set force_alloc as ttm_opt_ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC

Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/ttm/ttm_memory.c         | 33 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/ttm/ttm_page_alloc.c     | 20 ++++++++++++-------
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 30 +++++++++++++++++------------
 include/drm/ttm/ttm_memory.h             |  5 +++++
 4 files changed, 69 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 154719b..7e8d3ec 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -36,6 +36,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/swap.h>
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
@@ -375,6 +376,11 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 
 	si_meminfo(&si);
 
+	/* lower limit of swap space and 256MB is enough */
+	glob->lower_mem_limit = 256 << 8;
+	/* lower limit of ram and keep consistent with each zone->emer_mem */
+	glob->lower_mem_limit += si.totalram >> 2;
+
 	ret = ttm_mem_init_kernel_zone(glob, &si);
 	if (unlikely(ret != 0))
 		goto out_no_zone;
@@ -469,6 +475,33 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
 }
 EXPORT_SYMBOL(ttm_mem_global_free);
 
+/*
+ * check if the available mem is under lower memory limit
+ *
+ * a. if no swap disk at all or free swap space is under swap_mem_limit
+ * but available system mem is bigger than sys_mem_limit, allow TTM
+ * allocation;
+ *
+ * b. if the available system mem is less than sys_mem_limit but free
+ * swap disk is bigger than swap_mem_limit, allow TTM allocation.
+ */
+bool
+ttm_check_under_lowerlimit(struct ttm_mem_global *glob, bool force_alloc)
+{
+	bool ret = false;
+	uint64_t available;
+
+	if (force_alloc)
+		return false;
+
+	available = get_nr_swap_pages() + si_mem_available();
+	if (available < glob->lower_mem_limit)
+		ret = true;
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_check_under_lowerlimit);
+
 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
 				  struct ttm_mem_zone *single_zone,
 				  uint64_t amount, bool reserve,
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 5edcd89..a5bfdc2 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1094,7 +1094,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-	unsigned i;
+	unsigned i, unpopulate_count = 0;
 	int ret;
 
 	if (ttm->state != tt_unpopulated)
@@ -1102,17 +1102,19 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 
 	ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
 			    ttm->caching_state);
-	if (unlikely(ret != 0)) {
-		ttm_pool_unpopulate_helper(ttm, 0);
-		return ret;
-	}
+	if (unlikely(ret != 0))
+		goto error_populate;
+
+	if (ttm_check_under_lowerlimit(mem_glob,
+				       ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC))
+		goto error_populate;
 
 	for (i = 0; i < ttm->num_pages; ++i) {
 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
 						PAGE_SIZE, ctx);
 		if (unlikely(ret != 0)) {
-			ttm_pool_unpopulate_helper(ttm, i);
-			return -ENOMEM;
+			unpopulate_count = i;
+			goto error_populate;
 		}
 	}
 
@@ -1126,6 +1128,10 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 
 	ttm->state = tt_unbound;
 	return 0;
+
+error_populate:
+	ttm_pool_unpopulate_helper(ttm, unpopulate_count);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL(ttm_pool_populate);
 
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 354e0e1..5a86715 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -934,6 +934,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 	struct dma_pool *pool;
 	struct dma_page *d_page;
 	enum pool_type type;
+	bool force_alloc = true;
 	unsigned i;
 	int ret;
 
@@ -943,6 +944,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 	INIT_LIST_HEAD(&ttm_dma->pages_list);
 	i = 0;
 
+	force_alloc = ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC;
 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
@@ -964,12 +966,13 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 		if (!d_page)
 			break;
 
+		if (ttm_check_under_lowerlimit(mem_glob, force_alloc))
+			goto error_populate;
+
 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
 						pool->size, ctx);
-		if (unlikely(ret != 0)) {
-			ttm_dma_unpopulate(ttm_dma, dev);
-			return -ENOMEM;
-		}
+		if (unlikely(ret != 0))
+			goto error_populate;
 
 		d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
 		for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
@@ -996,17 +999,16 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 
 	while (num_pages) {
 		d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
-		if (!d_page) {
-			ttm_dma_unpopulate(ttm_dma, dev);
-			return -ENOMEM;
-		}
+		if (!d_page)
+			goto error_populate;
+
+		if (ttm_check_under_lowerlimit(mem_glob, force_alloc))
+			goto error_populate;
 
 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
 						pool->size, ctx);
-		if (unlikely(ret != 0)) {
-			ttm_dma_unpopulate(ttm_dma, dev);
-			return -ENOMEM;
-		}
+		if (unlikely(ret != 0))
+			goto error_populate;
 
 		d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
 		++i;
@@ -1023,6 +1025,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 
 	ttm->state = tt_unbound;
 	return 0;
+
+error_populate:
+	ttm_dma_unpopulate(ttm_dma, dev);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_populate);
 
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 8936285..3aa30d6 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -49,6 +49,8 @@
  * @work: The workqueue callback for the shrink queue.
  * @lock: Lock to protect the @shrink - and the memory accounting members,
  * that is, essentially the whole structure with some exceptions.
+ * @lower_mem_limit: include lower limit of swap space and lower limit of
+ * system memory.
  * @zones: Array of pointers to accounting zones.
  * @num_zones: Number of populated entries in the @zones array.
  * @zone_kernel: Pointer to the kernel zone.
@@ -67,6 +69,7 @@ struct ttm_mem_global {
 	struct workqueue_struct *swap_queue;
 	struct work_struct work;
 	spinlock_t lock;
+	uint64_t lower_mem_limit;
 	struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
 	unsigned int num_zones;
 	struct ttm_mem_zone *zone_kernel;
@@ -90,4 +93,6 @@ extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
 				     struct page *page, uint64_t size);
 extern size_t ttm_round_pot(size_t size);
 extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
+extern bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
+					bool force_alloc);
 #endif
-- 
2.7.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC
  2018-02-09  7:30 [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Roger He
  2018-02-09  7:30 ` [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean Roger He
  2018-02-09  7:30 ` [PATCH 3/3] drm/ttm: check if free mem space is under the lower limit Roger He
@ 2018-02-09  8:48 ` Christian König
  2 siblings, 0 replies; 6+ messages in thread
From: Christian König @ 2018-02-09  8:48 UTC (permalink / raw)
  To: Roger He, dri-devel

Am 09.02.2018 um 08:30 schrieb Roger He:
> set TTM_OPT_FLAG_FORCE_ALLOC when we are servicing for page
> fault routine.
>
> for ttm_mem_global_reserve if in page fault routine, allow the gtt
> pages reservation always. because page fault routing already grabbed
> system memory and the allowance of this exception is harmless.
> Otherwise, it will trigger OOM killer.
>
> v2: keep original behavior except ttm bo with flag no_retry
> v3: forward the ttm_operation_ctx to ttm_mem_global_reserve
>
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo_vm.c          |  6 ++++--
>   drivers/gpu/drm/ttm/ttm_memory.c         | 18 ++++++++++++++----
>   drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |  1 -
>   include/drm/ttm/ttm_bo_api.h             |  4 +++-
>   4 files changed, 21 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 716e724..313398a 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -224,7 +224,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
>   		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
>   						cvma.vm_page_prot);
>   	} else {
> -		struct ttm_operation_ctx ctx = {
> +		struct ttm_operation_ctx ttm_opt_ctx = {

Drop all those variable renames, if we really want to do this we should 
have a separate patch.

>   			.interruptible = false,
>   			.no_wait_gpu = false
>   		};
> @@ -233,8 +233,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
>   		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
>   						cvma.vm_page_prot);
>   
> +		if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
> +			ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;

Just set that flag unconditionally for the page fault context.

>   		/* Allocate all page at once, most common usage */
> -		if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
> +		if (ttm->bdev->driver->ttm_tt_populate(ttm, &ttm_opt_ctx)) {
>   			ret = VM_FAULT_OOM;
>   			goto out_io_unlock;
>   		}
> diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
> index aa0c381..154719b 100644
> --- a/drivers/gpu/drm/ttm/ttm_memory.c
> +++ b/drivers/gpu/drm/ttm/ttm_memory.c
> @@ -471,7 +471,8 @@ EXPORT_SYMBOL(ttm_mem_global_free);
>   
>   static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
>   				  struct ttm_mem_zone *single_zone,
> -				  uint64_t amount, bool reserve)
> +				  uint64_t amount, bool reserve,
> +				  struct ttm_operation_ctx *ctx)
>   {
>   	uint64_t limit;
>   	int ret = -ENOMEM;
> @@ -479,6 +480,15 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
>   	struct ttm_mem_zone *zone;
>   
>   	spin_lock(&glob->lock);
> +	/*
> +	 * to cover two special cases:
> +	 * a. if serving page_fault allow reservation anyway since
> +	 * it already allocated system pages. Otherwise it will trigger OOM.
> +	 * b. if serving suspend, allow reservation anyway as well.
> +	 */
> +	if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
> +		goto force_reserve;
> +

Oh, you want to skip the zone check as well? Not sure if that is a good 
idea or not.

Please drop that hunk for now and only skip the new limit when setting 
the ctx flag.

>   	for (i = 0; i < glob->num_zones; ++i) {
>   		zone = glob->zones[i];
>   		if (single_zone && zone != single_zone)
> @@ -491,6 +501,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
>   			goto out_unlock;
>   	}
>   
> +force_reserve:
>   	if (reserve) {
>   		for (i = 0; i < glob->num_zones; ++i) {
>   			zone = glob->zones[i];
> @@ -516,9 +527,8 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
>   {
>   	int count = TTM_MEMORY_ALLOC_RETRIES;
>   
> -	while (unlikely(ttm_mem_global_reserve(glob,
> -					       single_zone,
> -					       memory, true)
> +	while (unlikely(ttm_mem_global_reserve(glob, single_zone,
> +					       memory, true, ctx)
>   			!= 0)) {
>   		if (ctx->no_wait_gpu)
>   			return -ENOMEM;
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> index b122f6e..354e0e1 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.cttm_mem_global_reserve
> @@ -944,7 +944,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
>   	i = 0;
>   
>   	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
> -

Unrelated whitespace change please drop that.

Regards,
Christian.

>   #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>   	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
>   		goto skip_huge;
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 872ff6c..2142639 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -278,7 +278,9 @@ struct ttm_operation_ctx {
>   };
>   
>   /* Allow eviction of reserved BOs */
> -#define TTM_OPT_FLAG_ALLOW_RES_EVICT	0x1
> +#define TTM_OPT_FLAG_ALLOW_RES_EVICT		0x1
> +/* when serving page fault or suspend, allow alloc anyway */
> +#define TTM_OPT_FLAG_FORCE_ALLOC		0x2
>   
>   /**
>    * ttm_bo_reference - reference a struct ttm_buffer_object

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean
  2018-02-09  7:30 ` [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean Roger He
@ 2018-02-09  9:38   ` Christian König
  2018-02-09  9:43     ` He, Roger
  0 siblings, 1 reply; 6+ messages in thread
From: Christian König @ 2018-02-09  9:38 UTC (permalink / raw)
  To: Roger He, dri-devel

Am 09.02.2018 um 08:30 schrieb Roger He:
> if it is  true, allocate TTM pages regardless of zone global memory
> account limit. For example suspend, We should avoid TTM memory
> allocate failure to lead to whole process fail.
>
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++-----
>   1 file changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index a907311..685baad 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
>   EXPORT_SYMBOL(ttm_bo_create);
>   
>   static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
> -				   unsigned mem_type)
> +			unsigned mem_type, bool force_alloc)
>   {
> -	struct ttm_operation_ctx ctx = { false, false };
> +	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>   	struct ttm_bo_global *glob = bdev->glob;
>   	struct dma_fence *fence;
>   	int ret;
>   	unsigned i;
>   
> +	if (force_alloc)
> +		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;

Just unconditional set that flag here as well. ttm_bo_force_list_clean() 
is only called on two occasions:
1. By ttm_bo_evict_mm() during suspend.
2. By ttm_bo_clean_mm() when the driver unloads.

On both cases we absolutely don't want any memory allocation failure.

Christian.

>   	/*
>   	 * Can't use standard list traversal since we're unlocking.
>   	 */
> @@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
> +						  &ttm_opt_ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
> @@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   
>   	ret = 0;
>   	if (mem_type > 0) {
> -		ret = ttm_bo_force_list_clean(bdev, mem_type);
> +		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
>   		if (ret) {
>   			pr_err("Cleanup eviction failed\n");
>   			return ret;
> @@ -1433,7 +1436,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   		return 0;
>   	}
>   
> -	return ttm_bo_force_list_clean(bdev, mem_type);
> +	return ttm_bo_force_list_clean(bdev, mem_type, true);
>   }
>   EXPORT_SYMBOL(ttm_bo_evict_mm);
>   

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean
  2018-02-09  9:38   ` Christian König
@ 2018-02-09  9:43     ` He, Roger
  0 siblings, 0 replies; 6+ messages in thread
From: He, Roger @ 2018-02-09  9:43 UTC (permalink / raw)
  To: Koenig, Christian, dri-devel

Ok. please ignore patch3 since I have some minor changes.
Will send out later.

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: Koenig, Christian 
Sent: Friday, February 09, 2018 5:38 PM
To: He, Roger <Hongbo.He@amd.com>; dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean

Am 09.02.2018 um 08:30 schrieb Roger He:
> if it is  true, allocate TTM pages regardless of zone global memory 
> account limit. For example suspend, We should avoid TTM memory 
> allocate failure to lead to whole process fail.
>
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++-----
>   1 file changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c 
> b/drivers/gpu/drm/ttm/ttm_bo.c index a907311..685baad 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
>   EXPORT_SYMBOL(ttm_bo_create);
>   
>   static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
> -				   unsigned mem_type)
> +			unsigned mem_type, bool force_alloc)
>   {
> -	struct ttm_operation_ctx ctx = { false, false };
> +	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>   	struct ttm_bo_global *glob = bdev->glob;
>   	struct dma_fence *fence;
>   	int ret;
>   	unsigned i;
>   
> +	if (force_alloc)
> +		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;

Just unconditional set that flag here as well. ttm_bo_force_list_clean() is only called on two occasions:
1. By ttm_bo_evict_mm() during suspend.
2. By ttm_bo_clean_mm() when the driver unloads.

On both cases we absolutely don't want any memory allocation failure.






Christian.

>   	/*
>   	 * Can't use standard list traversal since we're unlocking.
>   	 */
> @@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
> +						  &ttm_opt_ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
> @@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, 
> unsigned mem_type)
>   
>   	ret = 0;
>   	if (mem_type > 0) {
> -		ret = ttm_bo_force_list_clean(bdev, mem_type);
> +		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
>   		if (ret) {
>   			pr_err("Cleanup eviction failed\n");
>   			return ret;
> @@ -1433,7 +1436,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   		return 0;
>   	}
>   
> -	return ttm_bo_force_list_clean(bdev, mem_type);
> +	return ttm_bo_force_list_clean(bdev, mem_type, true);
>   }
>   EXPORT_SYMBOL(ttm_bo_evict_mm);
>   

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-02-09  9:43 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-09  7:30 [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Roger He
2018-02-09  7:30 ` [PATCH 2/3] drm/ttm: add input parameter force_alloc for ttm_bo_force_list_clean Roger He
2018-02-09  9:38   ` Christian König
2018-02-09  9:43     ` He, Roger
2018-02-09  7:30 ` [PATCH 3/3] drm/ttm: check if free mem space is under the lower limit Roger He
2018-02-09  8:48 ` [PATCH 1/3] drm/ttm: add bit flag TTM_OPT_FLAG_FORCE_ALLOC Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.