From mboxrd@z Thu Jan 1 00:00:00 1970 From: Joonyoung Shim Subject: [PATCH 3/3] drm/cma: remove GEM CMA specific dma_buf functionality Date: Wed, 12 Jun 2013 22:16:19 +0900 Message-ID: <1371042979-14633-3-git-send-email-jy0922.shim@samsung.com> References: <1371042979-14633-1-git-send-email-jy0922.shim@samsung.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mailout2.samsung.com (mailout2.samsung.com [203.254.224.25]) by gabe.freedesktop.org (Postfix) with ESMTP id 1B613E6420 for ; Wed, 12 Jun 2013 06:16:18 -0700 (PDT) Received: from epcpsbgr5.samsung.com (u145.gpu120.samsung.co.kr [203.254.230.145]) by mailout2.samsung.com (Oracle Communications Messaging Server 7u4-24.01 (7.0.4.24.0) 64bit (built Nov 17 2011)) with ESMTP id <0MOA00E8J7IP6VD0@mailout2.samsung.com> for dri-devel@lists.freedesktop.org; Wed, 12 Jun 2013 22:16:14 +0900 (KST) In-reply-to: <1371042979-14633-1-git-send-email-jy0922.shim@samsung.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+sf-dri-devel=m.gmane.org@lists.freedesktop.org Errors-To: dri-devel-bounces+sf-dri-devel=m.gmane.org@lists.freedesktop.org To: dri-devel@lists.freedesktop.org Cc: laurent.pinchart@ideasonboard.com List-Id: dri-devel@lists.freedesktop.org We can use prime helpers instead. Signed-off-by: Joonyoung Shim --- drivers/gpu/drm/drm_gem_cma_helper.c | 283 ----------------------------------- include/drm/drm_gem_cma_helper.h | 6 - 2 files changed, 289 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 41a4635..076b045 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -317,289 +317,6 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m EXPORT_SYMBOL_GPL(drm_gem_cma_describe); #endif -/* ----------------------------------------------------------------------------- - * DMA-BUF - */ - -struct drm_gem_cma_dmabuf_attachment { - struct sg_table sgt; - enum dma_data_direction dir; -}; - -static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev, - struct dma_buf_attachment *attach) -{ - struct drm_gem_cma_dmabuf_attachment *cma_attach; - - cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL); - if (!cma_attach) - return -ENOMEM; - - cma_attach->dir = DMA_NONE; - attach->priv = cma_attach; - - return 0; -} - -static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attach) -{ - struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; - struct sg_table *sgt; - - if (cma_attach == NULL) - return; - - sgt = &cma_attach->sgt; - - if (cma_attach->dir != DMA_NONE) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, - cma_attach->dir); - - sg_free_table(sgt); - kfree(cma_attach); - attach->priv = NULL; -} - -static struct sg_table * -drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach, - enum dma_data_direction dir) -{ - struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv; - struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv; - struct drm_device *drm = cma_obj->base.dev; - struct scatterlist *rd, *wr; - struct sg_table *sgt; - unsigned int i; - int nents, ret; - - DRM_DEBUG_PRIME("\n"); - - if (WARN_ON(dir == DMA_NONE)) - return ERR_PTR(-EINVAL); - - /* Return the cached mapping when possible. */ - if (cma_attach->dir == dir) - return &cma_attach->sgt; - - /* Two mappings with different directions for the same attachment are - * not allowed. - */ - if (WARN_ON(cma_attach->dir != DMA_NONE)) - return ERR_PTR(-EBUSY); - - sgt = &cma_attach->sgt; - - ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL); - if (ret) { - DRM_ERROR("failed to alloc sgt.\n"); - return ERR_PTR(-ENOMEM); - } - - mutex_lock(&drm->struct_mutex); - - rd = cma_obj->sgt->sgl; - wr = sgt->sgl; - for (i = 0; i < sgt->orig_nents; ++i) { - sg_set_page(wr, sg_page(rd), rd->length, rd->offset); - rd = sg_next(rd); - wr = sg_next(wr); - } - - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with iommu.\n"); - sg_free_table(sgt); - sgt = ERR_PTR(-EIO); - goto done; - } - - cma_attach->dir = dir; - attach->priv = cma_attach; - - DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size); - -done: - mutex_unlock(&drm->struct_mutex); - return sgt; -} - -static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - /* Nothing to do. */ -} - -static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf) -{ - struct drm_gem_cma_object *cma_obj = dmabuf->priv; - - DRM_DEBUG_PRIME("%s\n", __FILE__); - - /* - * drm_gem_cma_dmabuf_release() call means that file object's - * f_count is 0 and it calls drm_gem_object_handle_unreference() - * to drop the references that these values had been increased - * at drm_prime_handle_to_fd() - */ - if (cma_obj->base.export_dma_buf == dmabuf) { - cma_obj->base.export_dma_buf = NULL; - - /* - * drop this gem object refcount to release allocated buffer - * and resources. - */ - drm_gem_object_unreference_unlocked(&cma_obj->base); - } -} - -static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf, - unsigned long page_num, void *addr) -{ - /* TODO */ -} - -static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf, - unsigned long page_num) -{ - /* TODO */ - - return NULL; -} - -static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf, - unsigned long page_num, void *addr) -{ - /* TODO */ -} - -static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf, - struct vm_area_struct *vma) -{ - struct drm_gem_cma_object *cma_obj = dmabuf->priv; - struct drm_gem_object *gem_obj = &cma_obj->base; - int ret; - - ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma); - if (ret < 0) - return ret; - - return drm_gem_cma_mmap_obj(cma_obj, vma); -} - -static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf) -{ - struct drm_gem_cma_object *cma_obj = dmabuf->priv; - - return cma_obj->vaddr; -} - -static struct dma_buf_ops drm_gem_cma_dmabuf_ops = { - .attach = drm_gem_cma_dmabuf_attach, - .detach = drm_gem_cma_dmabuf_detach, - .map_dma_buf = drm_gem_cma_dmabuf_map, - .unmap_dma_buf = drm_gem_cma_dmabuf_unmap, - .kmap = drm_gem_cma_dmabuf_kmap, - .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic, - .kunmap = drm_gem_cma_dmabuf_kunmap, - .kunmap_atomic = drm_gem_cma_dmabuf_kunmap_atomic, - .mmap = drm_gem_cma_dmabuf_mmap, - .vmap = drm_gem_cma_dmabuf_vmap, - .release = drm_gem_cma_dmabuf_release, -}; - -struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm, - struct drm_gem_object *obj, int flags) -{ - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - - return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops, - cma_obj->base.size, flags); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export); - -struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm, - struct dma_buf *dma_buf) -{ - struct drm_gem_cma_object *cma_obj; - struct dma_buf_attachment *attach; - struct sg_table *sgt; - int ret; - - DRM_DEBUG_PRIME("%s\n", __FILE__); - - /* is this one of own objects? */ - if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) { - struct drm_gem_object *obj; - - cma_obj = dma_buf->priv; - obj = &cma_obj->base; - - /* is it from our device? */ - if (obj->dev == drm) { - /* - * Importing dmabuf exported from out own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - drm_gem_object_reference(obj); - dma_buf_put(dma_buf); - return obj; - } - } - - /* Create a CMA GEM buffer. */ - cma_obj = __drm_gem_cma_create(drm, dma_buf->size); - if (IS_ERR(cma_obj)) - return ERR_PTR(PTR_ERR(cma_obj)); - - /* Attach to the buffer and map it. Make sure the mapping is contiguous - * on the device memory bus, as that's all we support. - */ - attach = dma_buf_attach(dma_buf, drm->dev); - if (IS_ERR(attach)) { - ret = -EINVAL; - goto error_gem_free; - } - - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR_OR_NULL(sgt)) { - ret = sgt ? PTR_ERR(sgt) : -ENOMEM; - goto error_buf_detach; - } - - if (sgt->nents != 1) { - ret = -EINVAL; - goto error_buf_unmap; - } - - cma_obj->base.import_attach = attach; - cma_obj->paddr = sg_dma_address(sgt->sgl); - cma_obj->sgt = sgt; - - DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr, - dma_buf->size); - - return &cma_obj->base; - -error_buf_unmap: - dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); -error_buf_detach: - dma_buf_detach(dma_buf, attach); -error_gem_free: - drm_gem_cma_free_object(&cma_obj->base); - return ERR_PTR(ret); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import); - /* low-level interface prime helpers */ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) { diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 9d39d2a..c34f27f 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -48,12 +48,6 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops; void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif -struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev, - struct drm_gem_object *obj, - int flags); -struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev, - struct dma_buf *dma_buf); - struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, -- 1.8.1.2