All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/1] nvmet: fix freeing unallocated p2pmem
@ 2021-06-01 16:22 Max Gurtovoy
  2021-06-01 17:09 ` Logan Gunthorpe
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Max Gurtovoy @ 2021-06-01 16:22 UTC (permalink / raw)
  To: linux-nvme, sagi, kbusch, hch, chaitanya.kulkarni
  Cc: oren, israelr, logang, Max Gurtovoy

In case p2p device was found but the p2p pool is empty, the nvme target
is still trying to free the sgl from the p2p pool instead of the
regular sgl pool and causing a crash (BUG() is called). Instead, assign
the p2p_dev for the request only if it was allocated from p2p pool.

This is the crash that was caused:

[Sun May 30 19:13:53 2021] ------------[ cut here ]------------
[Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
[Sun May 30 19:13:53 2021] invalid opcode: 0000 [#1] SMP PTI
...
[Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
...
[Sun May 30 19:13:53 2021] RIP: 0010:gen_pool_free_owner+0xa8/0xb0
...
[Sun May 30 19:13:53 2021] Call Trace:
[Sun May 30 19:13:53 2021] ------------[ cut here ]------------
[Sun May 30 19:13:53 2021]  pci_free_p2pmem+0x2b/0x70
[Sun May 30 19:13:53 2021]  pci_p2pmem_free_sgl+0x4f/0x80
[Sun May 30 19:13:53 2021]  nvmet_req_free_sgls+0x1e/0x80 [nvmet]
[Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
[Sun May 30 19:13:53 2021]  nvmet_rdma_release_rsp+0x4e/0x1f0 [nvmet_rdma]
[Sun May 30 19:13:53 2021]  nvmet_rdma_send_done+0x1c/0x60 [nvmet_rdma]

Fixes: c6e3f1339812 ("nvmet: add metadata support for block devices")
Reviewed-by: Israel Rukshin <israelr@nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
 drivers/nvme/target/core.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 4b29a5bac896..8afbb989b828 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
 	return req->transfer_len - req->metadata_len;
 }
 
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+		struct nvmet_req *req)
 {
-	req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
 			nvmet_data_transfer_len(req));
 	if (!req->sg)
 		goto out_err;
 
 	if (req->metadata_len) {
-		req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
 				&req->metadata_sg_cnt, req->metadata_len);
 		if (!req->metadata_sg)
 			goto out_free_sg;
 	}
+
+	req->p2p_dev = p2p_dev;
+
 	return 0;
 out_free_sg:
 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1025,25 +1029,25 @@ static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
 	return -ENOMEM;
 }
 
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
 {
-	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
-		return false;
+	struct pci_dev *p2p_dev = NULL;
 
-	if (req->sq->ctrl && req->sq->qid && req->ns) {
-		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
-						 req->ns->nsid);
-		if (req->p2p_dev)
-			return true;
-	}
+	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
+		goto out;
 
-	req->p2p_dev = NULL;
-	return false;
+	if (req->sq->ctrl && req->sq->qid && req->ns)
+		p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+					    req->ns->nsid);
+out:
+	return p2p_dev;
 }
 
 int nvmet_req_alloc_sgls(struct nvmet_req *req)
 {
-	if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
 		return 0;
 
 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1072,6 +1076,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
 		if (req->metadata_sg)
 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+		req->p2p_dev = NULL;
 	} else {
 		sgl_free(req->sg);
 		if (req->metadata_sg)
-- 
2.18.1


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] nvmet: fix freeing unallocated p2pmem
  2021-06-01 16:22 [PATCH 1/1] nvmet: fix freeing unallocated p2pmem Max Gurtovoy
@ 2021-06-01 17:09 ` Logan Gunthorpe
  2021-06-01 18:25 ` Chaitanya Kulkarni
  2021-06-02  7:11 ` Christoph Hellwig
  2 siblings, 0 replies; 4+ messages in thread
From: Logan Gunthorpe @ 2021-06-01 17:09 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme, sagi, kbusch, hch, chaitanya.kulkarni
  Cc: oren, israelr



On 2021-06-01 10:22 a.m., Max Gurtovoy wrote:
> In case p2p device was found but the p2p pool is empty, the nvme target
> is still trying to free the sgl from the p2p pool instead of the
> regular sgl pool and causing a crash (BUG() is called). Instead, assign
> the p2p_dev for the request only if it was allocated from p2p pool.
> 
> This is the crash that was caused:
> 
> [Sun May 30 19:13:53 2021] ------------[ cut here ]------------
> [Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
> [Sun May 30 19:13:53 2021] invalid opcode: 0000 [#1] SMP PTI
> ...
> [Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
> ...
> [Sun May 30 19:13:53 2021] RIP: 0010:gen_pool_free_owner+0xa8/0xb0
> ...
> [Sun May 30 19:13:53 2021] Call Trace:
> [Sun May 30 19:13:53 2021] ------------[ cut here ]------------
> [Sun May 30 19:13:53 2021]  pci_free_p2pmem+0x2b/0x70
> [Sun May 30 19:13:53 2021]  pci_p2pmem_free_sgl+0x4f/0x80
> [Sun May 30 19:13:53 2021]  nvmet_req_free_sgls+0x1e/0x80 [nvmet]
> [Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
> [Sun May 30 19:13:53 2021]  nvmet_rdma_release_rsp+0x4e/0x1f0 [nvmet_rdma]
> [Sun May 30 19:13:53 2021]  nvmet_rdma_send_done+0x1c/0x60 [nvmet_rdma]
> 
> Fixes: c6e3f1339812 ("nvmet: add metadata support for block devices")
> Reviewed-by: Israel Rukshin <israelr@nvidia.com>
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>

This makes sense to me save one small nit below. Thanks!

Reviewed-by: Logan Gunthorpe <logang@deltatee.com>



> -static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
> +static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
>  {
> -	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
> -		return false;
> +	struct pci_dev *p2p_dev = NULL;
>  
> -	if (req->sq->ctrl && req->sq->qid && req->ns) {
> -		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
> -						 req->ns->nsid);
> -		if (req->p2p_dev)
> -			return true;
> -	}
> +	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
> +		goto out;

Seems like we could just return NULL here and save the label and goto.

> -	req->p2p_dev = NULL;
> -	return false;
> +	if (req->sq->ctrl && req->sq->qid && req->ns)
> +		p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
> +					    req->ns->nsid);
> +out:
> +	return p2p_dev;
>  }
>  

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] nvmet: fix freeing unallocated p2pmem
  2021-06-01 16:22 [PATCH 1/1] nvmet: fix freeing unallocated p2pmem Max Gurtovoy
  2021-06-01 17:09 ` Logan Gunthorpe
@ 2021-06-01 18:25 ` Chaitanya Kulkarni
  2021-06-02  7:11 ` Christoph Hellwig
  2 siblings, 0 replies; 4+ messages in thread
From: Chaitanya Kulkarni @ 2021-06-01 18:25 UTC (permalink / raw)
  To: Max Gurtovoy, linux-nvme, sagi, kbusch, hch; +Cc: oren, israelr, logang

On 6/1/21 09:22, Max Gurtovoy wrote:
> In case p2p device was found but the p2p pool is empty, the nvme target
> is still trying to free the sgl from the p2p pool instead of the
> regular sgl pool and causing a crash (BUG() is called). Instead, assign
> the p2p_dev for the request only if it was allocated from p2p pool.
>
> This is the crash that was caused:
>
> [Sun May 30 19:13:53 2021] ------------[ cut here ]------------
> [Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
> [Sun May 30 19:13:53 2021] invalid opcode: 0000 [#1] SMP PTI
> ...
> [Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
> ...
> [Sun May 30 19:13:53 2021] RIP: 0010:gen_pool_free_owner+0xa8/0xb0
> ...
> [Sun May 30 19:13:53 2021] Call Trace:
> [Sun May 30 19:13:53 2021] ------------[ cut here ]------------
> [Sun May 30 19:13:53 2021]  pci_free_p2pmem+0x2b/0x70
> [Sun May 30 19:13:53 2021]  pci_p2pmem_free_sgl+0x4f/0x80
> [Sun May 30 19:13:53 2021]  nvmet_req_free_sgls+0x1e/0x80 [nvmet]
> [Sun May 30 19:13:53 2021] kernel BUG at lib/genalloc.c:518!
> [Sun May 30 19:13:53 2021]  nvmet_rdma_release_rsp+0x4e/0x1f0 [nvmet_rdma]
> [Sun May 30 19:13:53 2021]  nvmet_rdma_send_done+0x1c/0x60 [nvmet_rdma]
>
> Fixes: c6e3f1339812 ("nvmet: add metadata support for block devices")
> Reviewed-by: Israel Rukshin <israelr@nvidia.com>
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>

Looks good.

Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>




_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/1] nvmet: fix freeing unallocated p2pmem
  2021-06-01 16:22 [PATCH 1/1] nvmet: fix freeing unallocated p2pmem Max Gurtovoy
  2021-06-01 17:09 ` Logan Gunthorpe
  2021-06-01 18:25 ` Chaitanya Kulkarni
@ 2021-06-02  7:11 ` Christoph Hellwig
  2 siblings, 0 replies; 4+ messages in thread
From: Christoph Hellwig @ 2021-06-02  7:11 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: linux-nvme, sagi, kbusch, hch, chaitanya.kulkarni, oren, israelr, logang

Applied including the edit to streamline nvmet_req_find_p2p_dev.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-06-02  8:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-01 16:22 [PATCH 1/1] nvmet: fix freeing unallocated p2pmem Max Gurtovoy
2021-06-01 17:09 ` Logan Gunthorpe
2021-06-01 18:25 ` Chaitanya Kulkarni
2021-06-02  7:11 ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.