From: Andrzej Hajda <andrzej.hajda@intel.com>
To: Nirmoy Das <nirmoy.das@intel.com>, intel-gfx@lists.freedesktop.org
Cc: Jani Nikula <jani.nikula@intel.com>,
Matthew Auld <matthew.auld@intel.com>,
dri-devel@lists.freedesktop.org
Subject: Re: [Intel-gfx] [PATCH v2: 1/3] drm/i915: Add a function to mmap framebuffer obj
Date: Mon, 20 Mar 2023 15:02:57 +0100 [thread overview]
Message-ID: <c1b797a5-92d6-6590-9c47-6606ec409a12@intel.com> (raw)
In-Reply-To: <20230320100903.23588-1-nirmoy.das@intel.com>
On 20.03.2023 11:09, Nirmoy Das wrote:
> Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap()
> callback for i915's framebuffer objects.
>
> v2: add a comment why i915_gem_object_get() needed(Andi).
>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Andi Shyti <andi.shyti@linux.intel.com>
> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
> Cc: Jani Nikula <jani.nikula@intel.com>
> Cc: Imre Deak <imre.deak@intel.com>
> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com>
Regards
Andrzej
> ---
> drivers/gpu/drm/i915/gem/i915_gem_mman.c | 127 +++++++++++++++--------
> drivers/gpu/drm/i915/gem/i915_gem_mman.h | 2 +-
> 2 files changed, 83 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index d3c1dee16af2..341e952d3510 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -927,53 +927,15 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
> return file;
> }
>
> -/*
> - * This overcomes the limitation in drm_gem_mmap's assignment of a
> - * drm_gem_object as the vma->vm_private_data. Since we need to
> - * be able to resolve multiple mmap offsets which could be tied
> - * to a single gem object.
> - */
> -int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +static int
> +i915_gem_object_mmap(struct drm_i915_gem_object *obj,
> + struct i915_mmap_offset *mmo,
> + struct vm_area_struct *vma)
> {
> - struct drm_vma_offset_node *node;
> - struct drm_file *priv = filp->private_data;
> - struct drm_device *dev = priv->minor->dev;
> - struct drm_i915_gem_object *obj = NULL;
> - struct i915_mmap_offset *mmo = NULL;
> + struct drm_i915_private *i915 = to_i915(obj->base.dev);
> + struct drm_device *dev = &i915->drm;
> struct file *anon;
>
> - if (drm_dev_is_unplugged(dev))
> - return -ENODEV;
> -
> - rcu_read_lock();
> - drm_vma_offset_lock_lookup(dev->vma_offset_manager);
> - node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
> - vma->vm_pgoff,
> - vma_pages(vma));
> - if (node && drm_vma_node_is_allowed(node, priv)) {
> - /*
> - * Skip 0-refcnted objects as it is in the process of being
> - * destroyed and will be invalid when the vma manager lock
> - * is released.
> - */
> - if (!node->driver_private) {
> - mmo = container_of(node, struct i915_mmap_offset, vma_node);
> - obj = i915_gem_object_get_rcu(mmo->obj);
> -
> - GEM_BUG_ON(obj && obj->ops->mmap_ops);
> - } else {
> - obj = i915_gem_object_get_rcu
> - (container_of(node, struct drm_i915_gem_object,
> - base.vma_node));
> -
> - GEM_BUG_ON(obj && !obj->ops->mmap_ops);
> - }
> - }
> - drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
> - rcu_read_unlock();
> - if (!obj)
> - return node ? -EACCES : -EINVAL;
> -
> if (i915_gem_object_is_readonly(obj)) {
> if (vma->vm_flags & VM_WRITE) {
> i915_gem_object_put(obj);
> @@ -1005,7 +967,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> if (obj->ops->mmap_ops) {
> vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
> vma->vm_ops = obj->ops->mmap_ops;
> - vma->vm_private_data = node->driver_private;
> + vma->vm_private_data = obj->base.vma_node.driver_private;
> return 0;
> }
>
> @@ -1043,6 +1005,81 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> return 0;
> }
>
> +/*
> + * This overcomes the limitation in drm_gem_mmap's assignment of a
> + * drm_gem_object as the vma->vm_private_data. Since we need to
> + * be able to resolve multiple mmap offsets which could be tied
> + * to a single gem object.
> + */
> +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> + struct drm_vma_offset_node *node;
> + struct drm_file *priv = filp->private_data;
> + struct drm_device *dev = priv->minor->dev;
> + struct drm_i915_gem_object *obj = NULL;
> + struct i915_mmap_offset *mmo = NULL;
> +
> + if (drm_dev_is_unplugged(dev))
> + return -ENODEV;
> +
> + rcu_read_lock();
> + drm_vma_offset_lock_lookup(dev->vma_offset_manager);
> + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
> + vma->vm_pgoff,
> + vma_pages(vma));
> + if (node && drm_vma_node_is_allowed(node, priv)) {
> + /*
> + * Skip 0-refcnted objects as it is in the process of being
> + * destroyed and will be invalid when the vma manager lock
> + * is released.
> + */
> + if (!node->driver_private) {
> + mmo = container_of(node, struct i915_mmap_offset, vma_node);
> + obj = i915_gem_object_get_rcu(mmo->obj);
> +
> + GEM_BUG_ON(obj && obj->ops->mmap_ops);
> + } else {
> + obj = i915_gem_object_get_rcu
> + (container_of(node, struct drm_i915_gem_object,
> + base.vma_node));
> +
> + GEM_BUG_ON(obj && !obj->ops->mmap_ops);
> + }
> + }
> + drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
> + rcu_read_unlock();
> + if (!obj)
> + return node ? -EACCES : -EINVAL;
> +
> + return i915_gem_object_mmap(obj, mmo, vma);
> +}
> +
> +int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
> +{
> + struct drm_i915_private *i915 = to_i915(obj->base.dev);
> + struct drm_device *dev = &i915->drm;
> + struct i915_mmap_offset *mmo = NULL;
> + enum i915_mmap_type mmap_type;
> + struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
> +
> + if (drm_dev_is_unplugged(dev))
> + return -ENODEV;
> +
> + mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
> + mmo = mmap_offset_attach(obj, mmap_type, NULL);
> + if (!mmo)
> + return -ENODEV;
> +
> + /*
> + * When we install vm_ops for mmap we are too late for
> + * the vm_ops->open() which increases the ref_count of
> + * this obj and then it gets decreased by the vm_ops->close().
> + * To balance this increase the obj ref_count here.
> + */
> + obj = i915_gem_object_get(mmo->obj);
> + return i915_gem_object_mmap(obj, mmo, vma);
> +}
> +
> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> #include "selftests/i915_gem_mman.c"
> #endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
> index 1fa91b3033b3..196417fd0f5c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
> @@ -29,5 +29,5 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
>
> void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
> void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
> -
> +int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma);
> #endif
next prev parent reply other threads:[~2023-03-20 14:08 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-20 10:09 [PATCH v2: 1/3] drm/i915: Add a function to mmap framebuffer obj Nirmoy Das
2023-03-20 10:09 ` [PATCH 2/3] drm/i915/display: Add helper func to get intel_fbdev from drm_fb_helper Nirmoy Das
2023-03-20 10:39 ` Jani Nikula
2023-03-20 10:49 ` Andi Shyti
2023-03-20 13:58 ` [Intel-gfx] " Andrzej Hajda
2023-03-20 10:09 ` [PATCH RFC 3/3] drm/i915/display: Implement fb_mmap callback function Nirmoy Das
2023-03-20 14:00 ` [Intel-gfx] " Andrzej Hajda
2023-03-20 14:02 ` Andrzej Hajda [this message]
2023-03-23 8:00 ` [Intel-gfx] [PATCH v2: 1/3] drm/i915: Add a function to mmap framebuffer obj Das, Nirmoy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c1b797a5-92d6-6590-9c47-6606ec409a12@intel.com \
--to=andrzej.hajda@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-gfx@lists.freedesktop.org \
--cc=jani.nikula@intel.com \
--cc=matthew.auld@intel.com \
--cc=nirmoy.das@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).