* [PATCH] drm/ttm: fix DMA32 handling in the global page pool
@ 2020-11-17 15:53 Christian König
2020-11-19 9:21 ` Christian König
0 siblings, 1 reply; 3+ messages in thread
From: Christian König @ 2020-11-17 15:53 UTC (permalink / raw)
To: dri-devel
When we have mixed DMA32 and non DMA32 device in one system
it could otherwise happen that the DMA32 device gets pages
it can't work with.
Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/ttm/ttm_pool.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1b96780b4989..5455b2044759 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -63,6 +63,9 @@ static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER];
static struct ttm_pool_type global_uncached[MAX_ORDER];
+static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
+
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -290,8 +293,14 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->use_dma32)
+ return &global_dma32_write_combined[order];
+
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->use_dma32)
+ return &global_dma32_uncached[order];
+
return &global_uncached[order];
default:
break;
@@ -570,6 +579,11 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_puts(m, "uc\t:");
ttm_pool_debugfs_orders(global_uncached, m);
+ seq_puts(m, "wc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_write_combined, m);
+ seq_puts(m, "uc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_uncached, m);
+
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -640,6 +654,11 @@ int ttm_pool_mgr_init(unsigned long num_pages)
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+
+ ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_dma32_uncached[i], NULL,
+ ttm_uncached, i);
}
mm_shrinker.count_objects = ttm_pool_shrinker_count;
@@ -660,6 +679,9 @@ void ttm_pool_mgr_fini(void)
for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
+
+ ttm_pool_type_fini(&global_dma32_write_combined[i]);
+ ttm_pool_type_fini(&global_dma32_uncached[i]);
}
unregister_shrinker(&mm_shrinker);
--
2.25.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/ttm: fix DMA32 handling in the global page pool
2020-11-17 15:53 [PATCH] drm/ttm: fix DMA32 handling in the global page pool Christian König
@ 2020-11-19 9:21 ` Christian König
2020-11-19 10:22 ` Huang Rui
0 siblings, 1 reply; 3+ messages in thread
From: Christian König @ 2020-11-19 9:21 UTC (permalink / raw)
To: dri-devel, Daniel Vetter, Zhou, David(ChunMing), Huang Rui
Ping, can I get an rb or at least Acked-by for this?
Thanks in advance,
Christian.
Am 17.11.20 um 16:53 schrieb Christian König:
> When we have mixed DMA32 and non DMA32 device in one system
> it could otherwise happen that the DMA32 device gets pages
> it can't work with.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/ttm/ttm_pool.c | 22 ++++++++++++++++++++++
> 1 file changed, 22 insertions(+)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index 1b96780b4989..5455b2044759 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -63,6 +63,9 @@ static atomic_long_t allocated_pages;
> static struct ttm_pool_type global_write_combined[MAX_ORDER];
> static struct ttm_pool_type global_uncached[MAX_ORDER];
>
> +static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
> +static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
> +
> static spinlock_t shrinker_lock;
> static struct list_head shrinker_list;
> static struct shrinker mm_shrinker;
> @@ -290,8 +293,14 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
> #ifdef CONFIG_X86
> switch (caching) {
> case ttm_write_combined:
> + if (pool->use_dma32)
> + return &global_dma32_write_combined[order];
> +
> return &global_write_combined[order];
> case ttm_uncached:
> + if (pool->use_dma32)
> + return &global_dma32_uncached[order];
> +
> return &global_uncached[order];
> default:
> break;
> @@ -570,6 +579,11 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
> seq_puts(m, "uc\t:");
> ttm_pool_debugfs_orders(global_uncached, m);
>
> + seq_puts(m, "wc 32\t:");
> + ttm_pool_debugfs_orders(global_dma32_write_combined, m);
> + seq_puts(m, "uc 32\t:");
> + ttm_pool_debugfs_orders(global_dma32_uncached, m);
> +
> for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
> seq_puts(m, "DMA ");
> switch (i) {
> @@ -640,6 +654,11 @@ int ttm_pool_mgr_init(unsigned long num_pages)
> ttm_pool_type_init(&global_write_combined[i], NULL,
> ttm_write_combined, i);
> ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
> +
> + ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
> + ttm_write_combined, i);
> + ttm_pool_type_init(&global_dma32_uncached[i], NULL,
> + ttm_uncached, i);
> }
>
> mm_shrinker.count_objects = ttm_pool_shrinker_count;
> @@ -660,6 +679,9 @@ void ttm_pool_mgr_fini(void)
> for (i = 0; i < MAX_ORDER; ++i) {
> ttm_pool_type_fini(&global_write_combined[i]);
> ttm_pool_type_fini(&global_uncached[i]);
> +
> + ttm_pool_type_fini(&global_dma32_write_combined[i]);
> + ttm_pool_type_fini(&global_dma32_uncached[i]);
> }
>
> unregister_shrinker(&mm_shrinker);
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/ttm: fix DMA32 handling in the global page pool
2020-11-19 9:21 ` Christian König
@ 2020-11-19 10:22 ` Huang Rui
0 siblings, 0 replies; 3+ messages in thread
From: Huang Rui @ 2020-11-19 10:22 UTC (permalink / raw)
To: Christian König; +Cc: Zhou, David(ChunMing), dri-devel
On Thu, Nov 19, 2020 at 05:21:51PM +0800, Christian König wrote:
> Ping, can I get an rb or at least Acked-by for this?
>
> Thanks in advance,
> Christian.
>
> Am 17.11.20 um 16:53 schrieb Christian König:
> > When we have mixed DMA32 and non DMA32 device in one system
> > it could otherwise happen that the DMA32 device gets pages
> > it can't work with.
> >
> > Signed-off-by: Christian König <christian.koenig@amd.com>
Looks good for me.
Reviewed-by: Huang Rui <ray.huang@amd.com>
> > ---
> > drivers/gpu/drm/ttm/ttm_pool.c | 22 ++++++++++++++++++++++
> > 1 file changed, 22 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> > index 1b96780b4989..5455b2044759 100644
> > --- a/drivers/gpu/drm/ttm/ttm_pool.c
> > +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> > @@ -63,6 +63,9 @@ static atomic_long_t allocated_pages;
> > static struct ttm_pool_type global_write_combined[MAX_ORDER];
> > static struct ttm_pool_type global_uncached[MAX_ORDER];
> >
> > +static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
> > +static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
> > +
> > static spinlock_t shrinker_lock;
> > static struct list_head shrinker_list;
> > static struct shrinker mm_shrinker;
> > @@ -290,8 +293,14 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
> > #ifdef CONFIG_X86
> > switch (caching) {
> > case ttm_write_combined:
> > + if (pool->use_dma32)
> > + return &global_dma32_write_combined[order];
> > +
> > return &global_write_combined[order];
> > case ttm_uncached:
> > + if (pool->use_dma32)
> > + return &global_dma32_uncached[order];
> > +
> > return &global_uncached[order];
> > default:
> > break;
> > @@ -570,6 +579,11 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
> > seq_puts(m, "uc\t:");
> > ttm_pool_debugfs_orders(global_uncached, m);
> >
> > + seq_puts(m, "wc 32\t:");
> > + ttm_pool_debugfs_orders(global_dma32_write_combined, m);
> > + seq_puts(m, "uc 32\t:");
> > + ttm_pool_debugfs_orders(global_dma32_uncached, m);
> > +
> > for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
> > seq_puts(m, "DMA ");
> > switch (i) {
> > @@ -640,6 +654,11 @@ int ttm_pool_mgr_init(unsigned long num_pages)
> > ttm_pool_type_init(&global_write_combined[i], NULL,
> > ttm_write_combined, i);
> > ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
> > +
> > + ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
> > + ttm_write_combined, i);
> > + ttm_pool_type_init(&global_dma32_uncached[i], NULL,
> > + ttm_uncached, i);
> > }
> >
> > mm_shrinker.count_objects = ttm_pool_shrinker_count;
> > @@ -660,6 +679,9 @@ void ttm_pool_mgr_fini(void)
> > for (i = 0; i < MAX_ORDER; ++i) {
> > ttm_pool_type_fini(&global_write_combined[i]);
> > ttm_pool_type_fini(&global_uncached[i]);
> > +
> > + ttm_pool_type_fini(&global_dma32_write_combined[i]);
> > + ttm_pool_type_fini(&global_dma32_uncached[i]);
> > }
> >
> > unregister_shrinker(&mm_shrinker);
>
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-11-19 10:23 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-17 15:53 [PATCH] drm/ttm: fix DMA32 handling in the global page pool Christian König
2020-11-19 9:21 ` Christian König
2020-11-19 10:22 ` Huang Rui
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.