All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 for-next] RDMA/hns: Add a new mmap implementation
@ 2021-10-27 10:41 Wenpeng Liang
  2021-10-27 12:09 ` Jason Gunthorpe
  0 siblings, 1 reply; 3+ messages in thread
From: Wenpeng Liang @ 2021-10-27 10:41 UTC (permalink / raw)
  To: dledford, jgg; +Cc: linux-rdma, linuxarm, liangwenpeng

From: Chengchang Tang <tangchengchang@huawei.com>

Add a new implementation for mmap by using the new mmap entry API.

Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_device.h |  23 ++++
 drivers/infiniband/hw/hns/hns_roce_main.c   | 139 ++++++++++++++++----
 include/rdma/ib_verbs.h                     |   8 ++
 3 files changed, 144 insertions(+), 26 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index e5dadcd118ac..b624799624dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -225,11 +225,24 @@ struct hns_roce_uar {
 	unsigned long	logic_idx;
 };
 
+enum hns_roce_mmap_type {
+	HNS_ROCE_MMAP_TYPE_DB = 1,
+	HNS_ROCE_MMAP_TYPE_TPTR,
+};
+
+struct hns_user_mmap_entry {
+	struct rdma_user_mmap_entry rdma_entry;
+	enum hns_roce_mmap_type mmap_type;
+	u64 address;
+};
+
 struct hns_roce_ucontext {
 	struct ib_ucontext	ibucontext;
 	struct hns_roce_uar	uar;
 	struct list_head	page_list;
 	struct mutex		page_mutex;
+	struct hns_user_mmap_entry *db_mmap_entry;
+	struct hns_user_mmap_entry *tptr_mmap_entry;
 };
 
 struct hns_roce_pd {
@@ -1050,6 +1063,12 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
 	return container_of(ibsrq, struct hns_roce_srq, ibsrq);
 }
 
+static inline struct hns_user_mmap_entry *to_hns_mmap(
+					struct rdma_user_mmap_entry *rdma_entry)
+{
+	return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
+}
+
 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
 {
 	writeq(*(u64 *)val, dest);
@@ -1260,4 +1279,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
 int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
 			       struct ib_cq *ib_cq);
+struct hns_user_mmap_entry *hns_roce_user_mmap_entry_insert(
+				struct ib_ucontext *ucontext,
+				u64 address, size_t length,
+				enum hns_roce_mmap_type mmap_type);
 #endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b3595b6079b5..8630377ab99c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -292,6 +292,76 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
 	return 0;
 }
 
+struct hns_user_mmap_entry *hns_roce_user_mmap_entry_insert(
+				struct ib_ucontext *ucontext,
+				u64 address, size_t length,
+				enum hns_roce_mmap_type mmap_type)
+{
+	struct hns_user_mmap_entry *entry;
+	int ret;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return NULL;
+
+	entry->address = address;
+	entry->mmap_type = mmap_type;
+
+	ret = rdma_user_mmap_entry_insert_exact(
+			ucontext, &entry->rdma_entry, length,
+			mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+	if (ret) {
+		kfree(entry);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
+{
+	if (context->db_mmap_entry)
+		rdma_user_mmap_entry_remove(
+				&context->db_mmap_entry->rdma_entry);
+
+	if (context->tptr_mmap_entry)
+		rdma_user_mmap_entry_remove(
+				&context->tptr_mmap_entry->rdma_entry);
+}
+
+static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+{
+	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+	u64 address;
+	int ret;
+
+	address = context->uar.pfn << PAGE_SHIFT;
+	context->db_mmap_entry =
+		hns_roce_user_mmap_entry_insert(uctx, address, PAGE_SIZE,
+						HNS_ROCE_MMAP_TYPE_DB);
+	if (!context->db_mmap_entry)
+		return -ENOMEM;
+
+	if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+		return 0;
+
+	context->tptr_mmap_entry =
+		hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
+						hr_dev->tptr_size,
+						HNS_ROCE_MMAP_TYPE_TPTR);
+	if (!context->tptr_mmap_entry) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	hns_roce_dealloc_uar_entry(context);
+	return ret;
+}
+
 static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
 				   struct ib_udata *udata)
 {
@@ -310,6 +380,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
 	if (ret)
 		goto error_fail_uar_alloc;
 
+	ret = hns_roce_alloc_uar_entry(uctx);
+	if (ret)
+		goto error_fail_uar_entry;
+
 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
 	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
 		INIT_LIST_HEAD(&context->page_list);
@@ -326,6 +400,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
 	return 0;
 
 error_fail_copy_to_udata:
+	hns_roce_dealloc_uar_entry(context);
+
+error_fail_uar_entry:
 	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 
 error_fail_uar_alloc:
@@ -337,39 +414,48 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
 	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
 
+	hns_roce_dealloc_uar_entry(context);
+
 	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 }
 
-static int hns_roce_mmap(struct ib_ucontext *context,
-			 struct vm_area_struct *vma)
+static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
 {
-	struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
-
-	switch (vma->vm_pgoff) {
-	case 0:
-		return rdma_user_mmap_io(context, vma,
-					 to_hr_ucontext(context)->uar.pfn,
-					 PAGE_SIZE,
-					 pgprot_noncached(vma->vm_page_prot),
-					 NULL);
-
-	/* vm_pgoff: 1 -- TPTR */
-	case 1:
-		if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
-			return -EINVAL;
-		/*
-		 * FIXME: using io_remap_pfn_range on the dma address returned
-		 * by dma_alloc_coherent is totally wrong.
-		 */
-		return rdma_user_mmap_io(context, vma,
-					 hr_dev->tptr_dma_addr >> PAGE_SHIFT,
-					 hr_dev->tptr_size,
-					 vma->vm_page_prot,
-					 NULL);
+	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+	struct ib_device *ibdev = &hr_dev->ib_dev;
+	struct rdma_user_mmap_entry *rdma_entry;
+	struct hns_user_mmap_entry *entry;
+	phys_addr_t pfn;
+	pgprot_t prot;
+	int ret;
 
-	default:
+	rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+	if (!rdma_entry) {
+		ibdev_err(ibdev, "Invalid entry vm_pgoff %lu.\n",
+			  vma->vm_pgoff);
 		return -EINVAL;
 	}
+
+	entry = to_hns_mmap(rdma_entry);
+	pfn = entry->address >> PAGE_SHIFT;
+	prot = vma->vm_page_prot;
+
+	if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
+		prot = pgprot_noncached(prot);
+
+	ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+				prot, rdma_entry);
+
+	rdma_user_mmap_entry_put(rdma_entry);
+
+	return ret;
+}
+
+static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+	struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
+
+	kfree(entry);
 }
 
 static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
@@ -445,6 +531,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
 	.get_link_layer = hns_roce_get_link_layer,
 	.get_port_immutable = hns_roce_port_immutable,
 	.mmap = hns_roce_mmap,
+	.mmap_free = hns_roce_free_mmap,
 	.modify_device = hns_roce_modify_device,
 	.modify_qp = hns_roce_modify_qp,
 	.query_ah = hns_roce_query_ah,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 61c73adccbbd..301689d31438 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2914,6 +2914,14 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
 				      size_t length, u32 min_pgoff,
 				      u32 max_pgoff);
 
+static inline int rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
+				      struct rdma_user_mmap_entry *entry,
+				      size_t length, u32 pgoff)
+{
+	return rdma_user_mmap_entry_insert_range(ucontext, entry, length,
+						 pgoff, pgoff);
+}
+
 struct rdma_user_mmap_entry *
 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
 			       unsigned long pgoff);
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v3 for-next] RDMA/hns: Add a new mmap implementation
  2021-10-27 10:41 [PATCH v3 for-next] RDMA/hns: Add a new mmap implementation Wenpeng Liang
@ 2021-10-27 12:09 ` Jason Gunthorpe
  2021-10-28  8:45   ` Wenpeng Liang
  0 siblings, 1 reply; 3+ messages in thread
From: Jason Gunthorpe @ 2021-10-27 12:09 UTC (permalink / raw)
  To: Wenpeng Liang; +Cc: dledford, linux-rdma, linuxarm

On Wed, Oct 27, 2021 at 06:41:29PM +0800, Wenpeng Liang wrote:
> +static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
> +{
> +	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
> +	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
> +	u64 address;
> +	int ret;
> +
> +	address = context->uar.pfn << PAGE_SHIFT;
> +	context->db_mmap_entry =
> +		hns_roce_user_mmap_entry_insert(uctx, address, PAGE_SIZE,
> +						HNS_ROCE_MMAP_TYPE_DB);
> +	if (!context->db_mmap_entry)
> +		return -ENOMEM;
> +
> +	if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
> +		return 0;

You can move the FIXME comment below to here

> +	context->tptr_mmap_entry =
> +		hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
> +						hr_dev->tptr_size,
> +						HNS_ROCE_MMAP_TYPE_TPTR);
> +	if (!context->tptr_mmap_entry) {
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	return 0;
> +
> +err:
> +	hns_roce_dealloc_uar_entry(context);
> +	return ret;
> +}

> +	rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
> +	if (!rdma_entry) {
> +		ibdev_err(ibdev, "Invalid entry vm_pgoff %lu.\n",
> +			  vma->vm_pgoff);

Do not print on user controlled paths

Jason

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v3 for-next] RDMA/hns: Add a new mmap implementation
  2021-10-27 12:09 ` Jason Gunthorpe
@ 2021-10-28  8:45   ` Wenpeng Liang
  0 siblings, 0 replies; 3+ messages in thread
From: Wenpeng Liang @ 2021-10-28  8:45 UTC (permalink / raw)
  To: Jason Gunthorpe; +Cc: dledford, linux-rdma, linuxarm

On 2021/10/27 20:09, Jason Gunthorpe wrote:
> On Wed, Oct 27, 2021 at 06:41:29PM +0800, Wenpeng Liang wrote:
>> +static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
>> +{
>> +	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
>> +	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
>> +	u64 address;
>> +	int ret;
>> +
>> +	address = context->uar.pfn << PAGE_SHIFT;
>> +	context->db_mmap_entry =
>> +		hns_roce_user_mmap_entry_insert(uctx, address, PAGE_SIZE,
>> +						HNS_ROCE_MMAP_TYPE_DB);
>> +	if (!context->db_mmap_entry)
>> +		return -ENOMEM;
>> +
>> +	if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
>> +		return 0;
> 
> You can move the FIXME comment below to here
> 

Thanks, I will move it in v4.

>> +	context->tptr_mmap_entry =
>> +		hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
>> +						hr_dev->tptr_size,
>> +						HNS_ROCE_MMAP_TYPE_TPTR);
>> +	if (!context->tptr_mmap_entry) {
>> +		ret = -ENOMEM;
>> +		goto err;
>> +	}
>> +
>> +	return 0;
>> +
>> +err:
>> +	hns_roce_dealloc_uar_entry(context);
>> +	return ret;
>> +}
> 
>> +	rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
>> +	if (!rdma_entry) {
>> +		ibdev_err(ibdev, "Invalid entry vm_pgoff %lu.\n",
>> +			  vma->vm_pgoff);
> 
> Do not print on user controlled paths
> 
> Jason
> .
> 

Thanks, I will remove it in v4.

Wenpeng

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-10-28  8:45 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-27 10:41 [PATCH v3 for-next] RDMA/hns: Add a new mmap implementation Wenpeng Liang
2021-10-27 12:09 ` Jason Gunthorpe
2021-10-28  8:45   ` Wenpeng Liang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.