linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] IB/mlx5: unify return value to ENOENT
@ 2021-09-03  8:48 Li Zhijian
  2021-09-28 17:08 ` Jason Gunthorpe
  2021-09-29  4:21 ` lizhijian
  0 siblings, 2 replies; 7+ messages in thread
From: Li Zhijian @ 2021-09-03  8:48 UTC (permalink / raw)
  To: leon, dledford, jgg, linux-rdma; +Cc: linux-kernel, Li Zhijian

Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
the errors all occur at get_prefetchable_mr().

flags = IBV_ADVISE_MR_FLAG_FLUSH:
mlx5_ib_advise_mr_prefetch()
  -> mlx5_ib_prefetch_sg_list()
      -> get_prefetchable_mr()
  return -ENOENT;

flags = 0:
mlx5_ib_advise_mr_prefetch()
  -> init_prefetch_work()
     -> get_prefetchable_mr()
  return -EINVAL;

Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
---
 drivers/infiniband/hw/mlx5/odp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebc..52572e7ea6f6 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1828,7 +1828,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 
 	if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
 		destroy_prefetch_work(work);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	queue_work(system_unbound_wq, &work->work);
 	return 0;
-- 
2.31.1




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] IB/mlx5: unify return value to ENOENT
  2021-09-03  8:48 [PATCH] IB/mlx5: unify return value to ENOENT Li Zhijian
@ 2021-09-28 17:08 ` Jason Gunthorpe
  2021-09-29  4:37   ` lizhijian
  2021-10-13  7:26   ` lizhijian
  2021-09-29  4:21 ` lizhijian
  1 sibling, 2 replies; 7+ messages in thread
From: Jason Gunthorpe @ 2021-09-28 17:08 UTC (permalink / raw)
  To: Li Zhijian; +Cc: leon, dledford, linux-rdma, linux-kernel

On Fri, Sep 03, 2021 at 04:48:15PM +0800, Li Zhijian wrote:
> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
> the errors all occur at get_prefetchable_mr().

What do you think about this instead?

From b739920ed4869decb02a0dbc58256e6c72ec7061 Mon Sep 17 00:00:00 2001
From: Jason Gunthorpe <jgg@nvidia.com>
Date: Fri, 3 Sep 2021 16:48:15 +0800
Subject: [PATCH] IB/mlx5: Flow through a more detailed return code from
 get_prefetchable_mr()

The error returns for various cases detected by get_prefetchable_mr() get
confused as it flows back to userspace. Properly label each error path and
flow the error code properly back to the system call.

Suggested-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/infiniband/hw/mlx5/odp.c | 40 ++++++++++++++++++++------------
 1 file changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebcc3..77890a85fc2dd3 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
 
 	xa_lock(&dev->odp_mkeys);
 	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
-	if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+	if (!mmkey || mmkey->key != lkey) {
+		mr = ERR_PTR(-ENOENT);
 		goto end;
+	}
+	if (mmkey->type != MLX5_MKEY_MR) {
+		mr = ERR_PTR(-EINVAL);
+		goto end;
+	}
 
 	mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
 
 	if (mr->ibmr.pd != pd) {
-		mr = NULL;
+		mr = ERR_PTR(-EPERM);
 		goto end;
 	}
 
 	/* prefetch with write-access must be supported by the MR */
 	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
 	    !mr->umem->writable) {
-		mr = NULL;
+		mr = ERR_PTR(-EPERM);
 		goto end;
 	}
 
@@ -1753,7 +1759,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
 	destroy_prefetch_work(work);
 }
 
-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
 			       enum ib_uverbs_advise_mr_advice advice,
 			       u32 pf_flags, struct prefetch_mr_work *work,
 			       struct ib_sge *sg_list, u32 num_sge)
@@ -1764,17 +1770,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
 	work->pf_flags = pf_flags;
 
 	for (i = 0; i < num_sge; ++i) {
+		struct mlx5_ib_mr *mr;
+
+		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+		if (IS_ERR(mr)) {
+			work->num_sge = i;
+			return PTR_ERR(mr);
+		}
 		work->frags[i].io_virt = sg_list[i].addr;
 		work->frags[i].length = sg_list[i].length;
-		work->frags[i].mr =
-			get_prefetchable_mr(pd, advice, sg_list[i].lkey);
-		if (!work->frags[i].mr) {
-			work->num_sge = i;
-			return false;
-		}
+		work->frags[i].mr = mr;
 	}
 	work->num_sge = num_sge;
-	return true;
+	return 0;
 }
 
 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@ -1790,8 +1798,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
 		struct mlx5_ib_mr *mr;
 
 		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
-		if (!mr)
-			return -ENOENT;
+		if (IS_ERR(mr))
+			return PTR_ERR(mr);
 		ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
 				   &bytes_mapped, pf_flags);
 		if (ret < 0) {
@@ -1811,6 +1819,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 {
 	u32 pf_flags = 0;
 	struct prefetch_mr_work *work;
+	int rc;
 
 	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
 		pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1826,9 +1835,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 	if (!work)
 		return -ENOMEM;
 
-	if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+	rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+	if (rc) {
 		destroy_prefetch_work(work);
-		return -EINVAL;
+		return rc;
 	}
 	queue_work(system_unbound_wq, &work->work);
 	return 0;
-- 
2.33.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] IB/mlx5: unify return value to ENOENT
  2021-09-03  8:48 [PATCH] IB/mlx5: unify return value to ENOENT Li Zhijian
  2021-09-28 17:08 ` Jason Gunthorpe
@ 2021-09-29  4:21 ` lizhijian
  1 sibling, 0 replies; 7+ messages in thread
From: lizhijian @ 2021-09-29  4:21 UTC (permalink / raw)
  To: leon, dledford, jgg, linux-rdma; +Cc: linux-kernel, lizhijian

ping


On 03/09/2021 16:48, Li Zhijian wrote:
> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
> the errors all occur at get_prefetchable_mr().
>
> flags = IBV_ADVISE_MR_FLAG_FLUSH:
> mlx5_ib_advise_mr_prefetch()
>    -> mlx5_ib_prefetch_sg_list()
>        -> get_prefetchable_mr()
>    return -ENOENT;
>
> flags = 0:
> mlx5_ib_advise_mr_prefetch()
>    -> init_prefetch_work()
>       -> get_prefetchable_mr()
>    return -EINVAL;
>
> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> ---
>   drivers/infiniband/hw/mlx5/odp.c | 2 +-
>   1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
> index d0d98e584ebc..52572e7ea6f6 100644
> --- a/drivers/infiniband/hw/mlx5/odp.c
> +++ b/drivers/infiniband/hw/mlx5/odp.c
> @@ -1828,7 +1828,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
>   
>   	if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
>   		destroy_prefetch_work(work);
> -		return -EINVAL;
> +		return -ENOENT;
>   	}
>   	queue_work(system_unbound_wq, &work->work);
>   	return 0;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] IB/mlx5: unify return value to ENOENT
  2021-09-28 17:08 ` Jason Gunthorpe
@ 2021-09-29  4:37   ` lizhijian
  2021-10-01 14:45     ` Jason Gunthorpe
  2021-10-13  7:26   ` lizhijian
  1 sibling, 1 reply; 7+ messages in thread
From: lizhijian @ 2021-09-29  4:37 UTC (permalink / raw)
  To: Jason Gunthorpe, lizhijian; +Cc: leon, dledford, linux-rdma, linux-kernel

So sorry, I missed this reply at previous ping


On 29/09/2021 01:08, Jason Gunthorpe wrote:
> On Fri, Sep 03, 2021 at 04:48:15PM +0800, Li Zhijian wrote:
>> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
>> the errors all occur at get_prefetchable_mr().
> What do you think about this instead?
Thank you, it's much better.

Thanks
Zhijian




>
>  From b739920ed4869decb02a0dbc58256e6c72ec7061 Mon Sep 17 00:00:00 2001
> From: Jason Gunthorpe <jgg@nvidia.com>
> Date: Fri, 3 Sep 2021 16:48:15 +0800
> Subject: [PATCH] IB/mlx5: Flow through a more detailed return code from
>   get_prefetchable_mr()
>
> The error returns for various cases detected by get_prefetchable_mr() get
> confused as it flows back to userspace. Properly label each error path and
> flow the error code properly back to the system call.
>
> Suggested-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>   drivers/infiniband/hw/mlx5/odp.c | 40 ++++++++++++++++++++------------
>   1 file changed, 25 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
> index d0d98e584ebcc3..77890a85fc2dd3 100644
> --- a/drivers/infiniband/hw/mlx5/odp.c
> +++ b/drivers/infiniband/hw/mlx5/odp.c
> @@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
>   
>   	xa_lock(&dev->odp_mkeys);
>   	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
> -	if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
> +	if (!mmkey || mmkey->key != lkey) {
> +		mr = ERR_PTR(-ENOENT);
>   		goto end;
> +	}
> +	if (mmkey->type != MLX5_MKEY_MR) {
> +		mr = ERR_PTR(-EINVAL);
> +		goto end;
> +	}
>   
>   	mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
>   
>   	if (mr->ibmr.pd != pd) {
> -		mr = NULL;
> +		mr = ERR_PTR(-EPERM);
>   		goto end;
>   	}
>   
>   	/* prefetch with write-access must be supported by the MR */
>   	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
>   	    !mr->umem->writable) {
> -		mr = NULL;
> +		mr = ERR_PTR(-EPERM);
>   		goto end;
>   	}
>   
> @@ -1753,7 +1759,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
>   	destroy_prefetch_work(work);
>   }
>   
> -static bool init_prefetch_work(struct ib_pd *pd,
> +static int init_prefetch_work(struct ib_pd *pd,
>   			       enum ib_uverbs_advise_mr_advice advice,
>   			       u32 pf_flags, struct prefetch_mr_work *work,
>   			       struct ib_sge *sg_list, u32 num_sge)
> @@ -1764,17 +1770,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
>   	work->pf_flags = pf_flags;
>   
>   	for (i = 0; i < num_sge; ++i) {
> +		struct mlx5_ib_mr *mr;
> +
> +		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> +		if (IS_ERR(mr)) {
> +			work->num_sge = i;
> +			return PTR_ERR(mr);
> +		}
>   		work->frags[i].io_virt = sg_list[i].addr;
>   		work->frags[i].length = sg_list[i].length;
> -		work->frags[i].mr =
> -			get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> -		if (!work->frags[i].mr) {
> -			work->num_sge = i;
> -			return false;
> -		}
> +		work->frags[i].mr = mr;
>   	}
>   	work->num_sge = num_sge;
> -	return true;
> +	return 0;
>   }
>   
>   static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
> @@ -1790,8 +1798,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
>   		struct mlx5_ib_mr *mr;
>   
>   		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> -		if (!mr)
> -			return -ENOENT;
> +		if (IS_ERR(mr))
> +			return PTR_ERR(mr);
>   		ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
>   				   &bytes_mapped, pf_flags);
>   		if (ret < 0) {
> @@ -1811,6 +1819,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
>   {
>   	u32 pf_flags = 0;
>   	struct prefetch_mr_work *work;
> +	int rc;
>   
>   	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
>   		pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
> @@ -1826,9 +1835,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
>   	if (!work)
>   		return -ENOMEM;
>   
> -	if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
> +	rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
> +	if (rc) {
>   		destroy_prefetch_work(work);
> -		return -EINVAL;
> +		return rc;
>   	}
>   	queue_work(system_unbound_wq, &work->work);
>   	return 0;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] IB/mlx5: unify return value to ENOENT
  2021-09-29  4:37   ` lizhijian
@ 2021-10-01 14:45     ` Jason Gunthorpe
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Gunthorpe @ 2021-10-01 14:45 UTC (permalink / raw)
  To: lizhijian; +Cc: leon, dledford, linux-rdma, linux-kernel

On Wed, Sep 29, 2021 at 04:37:22AM +0000, lizhijian@fujitsu.com wrote:
> So sorry, I missed this reply at previous ping
> 
> 
> On 29/09/2021 01:08, Jason Gunthorpe wrote:
> > On Fri, Sep 03, 2021 at 04:48:15PM +0800, Li Zhijian wrote:
> >> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
> >> the errors all occur at get_prefetchable_mr().
> > What do you think about this instead?
> Thank you, it's much better.

Ok, applied to for-next

Jason

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] IB/mlx5: unify return value to ENOENT
  2021-09-28 17:08 ` Jason Gunthorpe
  2021-09-29  4:37   ` lizhijian
@ 2021-10-13  7:26   ` lizhijian
  2021-10-13 18:02     ` Jason Gunthorpe
  1 sibling, 1 reply; 7+ messages in thread
From: lizhijian @ 2021-10-13  7:26 UTC (permalink / raw)
  To: Jason Gunthorpe, lizhijian; +Cc: leon, dledford, linux-rdma, linux-kernel

Hi Jason

When update the ibv_advise_mr man page, i have a few concerns:


On 29/09/2021 01:08, Jason Gunthorpe wrote:
> On Fri, Sep 03, 2021 at 04:48:15PM +0800, Li Zhijian wrote:
>> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
>> the errors all occur at get_prefetchable_mr().
> What do you think about this instead?
>
>  From b739920ed4869decb02a0dbc58256e6c72ec7061 Mon Sep 17 00:00:00 2001
> From: Jason Gunthorpe <jgg@nvidia.com>
> Date: Fri, 3 Sep 2021 16:48:15 +0800
> Subject: [PATCH] IB/mlx5: Flow through a more detailed return code from
>   get_prefetchable_mr()
>
> The error returns for various cases detected by get_prefetchable_mr() get
> confused as it flows back to userspace. Properly label each error path and
> flow the error code properly back to the system call.
>
> Suggested-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>   drivers/infiniband/hw/mlx5/odp.c | 40 ++++++++++++++++++++------------
>   1 file changed, 25 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
> index d0d98e584ebcc3..77890a85fc2dd3 100644
> --- a/drivers/infiniband/hw/mlx5/odp.c
> +++ b/drivers/infiniband/hw/mlx5/odp.c
> @@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
>   
>   	xa_lock(&dev->odp_mkeys);
>   	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
> -	if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
> +	if (!mmkey || mmkey->key != lkey) {
> +		mr = ERR_PTR(-ENOENT);
>   		goto end;
> +	}
> +	if (mmkey->type != MLX5_MKEY_MR) {
> +		mr = ERR_PTR(-EINVAL);
> +		goto end;
> +	}


Can we return EINVAL in both above 2 cases so that we can attribute them to *lkey is invalid*  simply.
Otherwise it's hard to describe 2nd case by man page since users/developers cannot link mmkey->type
to the parameters of ibv_advise_mr().


>   
>   	mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
>   
>   	if (mr->ibmr.pd != pd) {
> -		mr = NULL;
> +		mr = ERR_PTR(-EPERM);

EINVAL is better  for compatible ? since man page said EINVAL in this case before.

Thanks


>   		goto end;
>   	}
>   
>   	/* prefetch with write-access must be supported by the MR */
>   	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
>   	    !mr->umem->writable) {
> -		mr = NULL;
> +		mr = ERR_PTR(-EPERM);
>   		goto end;
>   	}
>   
> @@ -1753,7 +1759,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
>   	destroy_prefetch_work(work);
>   }
>   
> -static bool init_prefetch_work(struct ib_pd *pd,
> +static int init_prefetch_work(struct ib_pd *pd,
>   			       enum ib_uverbs_advise_mr_advice advice,
>   			       u32 pf_flags, struct prefetch_mr_work *work,
>   			       struct ib_sge *sg_list, u32 num_sge)
> @@ -1764,17 +1770,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
>   	work->pf_flags = pf_flags;
>   
>   	for (i = 0; i < num_sge; ++i) {
> +		struct mlx5_ib_mr *mr;
> +
> +		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> +		if (IS_ERR(mr)) {
> +			work->num_sge = i;
> +			return PTR_ERR(mr);
> +		}
>   		work->frags[i].io_virt = sg_list[i].addr;
>   		work->frags[i].length = sg_list[i].length;
> -		work->frags[i].mr =
> -			get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> -		if (!work->frags[i].mr) {
> -			work->num_sge = i;
> -			return false;
> -		}
> +		work->frags[i].mr = mr;
>   	}
>   	work->num_sge = num_sge;
> -	return true;
> +	return 0;
>   }
>   
>   static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
> @@ -1790,8 +1798,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
>   		struct mlx5_ib_mr *mr;
>   
>   		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
> -		if (!mr)
> -			return -ENOENT;
> +		if (IS_ERR(mr))
> +			return PTR_ERR(mr);
>   		ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
>   				   &bytes_mapped, pf_flags);
>   		if (ret < 0) {
> @@ -1811,6 +1819,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
>   {
>   	u32 pf_flags = 0;
>   	struct prefetch_mr_work *work;
> +	int rc;
>   
>   	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
>   		pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
> @@ -1826,9 +1835,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
>   	if (!work)
>   		return -ENOMEM;
>   
> -	if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
> +	rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
> +	if (rc) {
>   		destroy_prefetch_work(work);
> -		return -EINVAL;
> +		return rc;
>   	}
>   	queue_work(system_unbound_wq, &work->work);
>   	return 0;

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] IB/mlx5: unify return value to ENOENT
  2021-10-13  7:26   ` lizhijian
@ 2021-10-13 18:02     ` Jason Gunthorpe
  0 siblings, 0 replies; 7+ messages in thread
From: Jason Gunthorpe @ 2021-10-13 18:02 UTC (permalink / raw)
  To: lizhijian; +Cc: leon, dledford, linux-rdma, linux-kernel

On Wed, Oct 13, 2021 at 07:26:49AM +0000, lizhijian@fujitsu.com wrote:
> Hi Jason
> 
> When update the ibv_advise_mr man page, i have a few concerns:
> 
> 
> On 29/09/2021 01:08, Jason Gunthorpe wrote:
> > On Fri, Sep 03, 2021 at 04:48:15PM +0800, Li Zhijian wrote:
> >> Previously, ENOENT or EINVAL will be returned by ibv_advise_mr() although
> >> the errors all occur at get_prefetchable_mr().
> > What do you think about this instead?
> >
> >  From b739920ed4869decb02a0dbc58256e6c72ec7061 Mon Sep 17 00:00:00 2001
> > From: Jason Gunthorpe <jgg@nvidia.com>
> > Date: Fri, 3 Sep 2021 16:48:15 +0800
> > Subject: [PATCH] IB/mlx5: Flow through a more detailed return code from
> >   get_prefetchable_mr()
> >
> > The error returns for various cases detected by get_prefetchable_mr() get
> > confused as it flows back to userspace. Properly label each error path and
> > flow the error code properly back to the system call.
> >
> > Suggested-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> >   drivers/infiniband/hw/mlx5/odp.c | 40 ++++++++++++++++++++------------
> >   1 file changed, 25 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
> > index d0d98e584ebcc3..77890a85fc2dd3 100644
> > +++ b/drivers/infiniband/hw/mlx5/odp.c
> > @@ -1708,20 +1708,26 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
> >   
> >   	xa_lock(&dev->odp_mkeys);
> >   	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
> > -	if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
> > +	if (!mmkey || mmkey->key != lkey) {
> > +		mr = ERR_PTR(-ENOENT);
> >   		goto end;
> > +	}
> > +	if (mmkey->type != MLX5_MKEY_MR) {
> > +		mr = ERR_PTR(-EINVAL);
> > +		goto end;
> > +	}
> 
> 
> Can we return EINVAL in both above 2 cases so that we can attribute
> them to *lkey is invalid*  simply.  Otherwise it's hard to describe
> 2nd case by man page since users/developers cannot link mmkey->type
> to the parameters of ibv_advise_mr().

kley is valid in the 2nd case, but points to the wrong kidn of object
to prefetch, hence EIVNAL. Eg it is a MW or something.

> >   	mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
> >   
> >   	if (mr->ibmr.pd != pd) {
> > -		mr = NULL;
> > +		mr = ERR_PTR(-EPERM);
> 
> EINVAL is better  for compatible ? since man page said EINVAL in this case before.

Referencing a valid lkey outside the caller's security scope should be
EPERM.

Jason

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-10-13 18:02 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-03  8:48 [PATCH] IB/mlx5: unify return value to ENOENT Li Zhijian
2021-09-28 17:08 ` Jason Gunthorpe
2021-09-29  4:37   ` lizhijian
2021-10-01 14:45     ` Jason Gunthorpe
2021-10-13  7:26   ` lizhijian
2021-10-13 18:02     ` Jason Gunthorpe
2021-09-29  4:21 ` lizhijian

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).