* [PATCH 1/1] nvme-tcp: check for invalid request
[not found] <CGME20240408102150epcas5p2d5b4c3460fca7fe58520254a2466271b@epcas5p2.samsung.com>
@ 2024-04-08 4:53 ` soni.ankit
2024-04-08 11:08 ` Sagi Grimberg
0 siblings, 1 reply; 3+ messages in thread
From: soni.ankit @ 2024-04-08 4:53 UTC (permalink / raw)
To: kbusch, axboe, hch, sagi
Cc: linux-nvme, sathya.m, d.palani, prakash.bv, anshul, ankitvvsoni,
Ankit Soni
From: Ankit Soni <soni.ankit@samsung.com>
The blk_mq_tag_to_rq() returns NULL for invalid tag.
Added check condition to prevent NULL Pointer derefernece.
Signed-off-by: Ankit Soni <soni.ankit@samsung.com>
---
drivers/nvme/host/tcp.c | 34 ++++++++++++++++++++++------------
1 file changed, 22 insertions(+), 12 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index fdbcdcedcee9..01a90bbed70b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -801,9 +801,17 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
- struct request *rq =
- nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_request *req;
+ struct request *rq;
+
+ rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (unlikely(!rq)) {
+ pr_err("could not locate request for tag %#x\n",
+ pdu->command_id);
+ return -EFAULT;
+ }
+
+ req = blk_mq_rq_to_pdu(rq);
while (true) {
int recv_len, ret;
@@ -875,6 +883,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
char *ddgst = (char *)&queue->recv_ddgst;
size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
+ struct nvme_tcp_request *req;
+ struct request *rq;
int ret;
ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
@@ -887,13 +897,17 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
if (queue->ddgst_remaining)
return 0;
- if (queue->recv_ddgst != queue->exp_ddgst) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (unlikely(!rq)) {
+ pr_err("could not locate request for tag %#x\n",
+ pdu->command_id);
+ return -EFAULT;
+ }
- req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
+ req = blk_mq_rq_to_pdu(rq);
+ if (queue->recv_ddgst != queue->exp_ddgst) {
+ req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
dev_err(queue->ctrl->ctrl.device,
"data digest error: recv %#x expected %#x\n",
le32_to_cpu(queue->recv_ddgst),
@@ -901,10 +915,6 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
}
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
- struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
-
nvme_tcp_end_request(rq, le16_to_cpu(req->status));
queue->nr_cqe++;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH 1/1] nvme-tcp: check for invalid request
2024-04-08 4:53 ` [PATCH 1/1] nvme-tcp: check for invalid request soni.ankit
@ 2024-04-08 11:08 ` Sagi Grimberg
2024-04-16 2:43 ` Chaitanya Kulkarni
0 siblings, 1 reply; 3+ messages in thread
From: Sagi Grimberg @ 2024-04-08 11:08 UTC (permalink / raw)
To: soni.ankit, kbusch, axboe, hch
Cc: linux-nvme, sathya.m, d.palani, prakash.bv, anshul, ankitvvsoni
On 08/04/2024 7:53, soni.ankit@samsung.com wrote:
> From: Ankit Soni <soni.ankit@samsung.com>
>
> The blk_mq_tag_to_rq() returns NULL for invalid tag.
> Added check condition to prevent NULL Pointer derefernece.
Does this fix an actual bug? Or a theoretic one?
>
> Signed-off-by: Ankit Soni <soni.ankit@samsung.com>
> ---
> drivers/nvme/host/tcp.c | 34 ++++++++++++++++++++++------------
> 1 file changed, 22 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index fdbcdcedcee9..01a90bbed70b 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -801,9 +801,17 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
> unsigned int *offset, size_t *len)
> {
> struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
> - struct request *rq =
> - nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
> - struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> + struct nvme_tcp_request *req;
> + struct request *rq;
> +
> + rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
> + if (unlikely(!rq)) {
> + pr_err("could not locate request for tag %#x\n",
> + pdu->command_id);
> + return -EFAULT;
> + }
> +
> + req = blk_mq_rq_to_pdu(rq);
We already checked the validity when processing the c2hpdu, there is no
need to do
that for all the data payload chunks as well as it does not change.
>
> while (true) {
> int recv_len, ret;
> @@ -875,6 +883,8 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
> char *ddgst = (char *)&queue->recv_ddgst;
> size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
> off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
> + struct nvme_tcp_request *req;
> + struct request *rq;
> int ret;
>
> ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
> @@ -887,13 +897,17 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
> if (queue->ddgst_remaining)
> return 0;
>
> - if (queue->recv_ddgst != queue->exp_ddgst) {
> - struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
> - pdu->command_id);
> - struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> + rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
> + if (unlikely(!rq)) {
> + pr_err("could not locate request for tag %#x\n",
> + pdu->command_id);
> + return -EFAULT;
> + }
Same here, the ddgst comes as a trailer to a c2hdata pdu which we already
checked the validity, you are simply taking an already validated pdu and
using
it. It is redundant.
>
> - req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
> + req = blk_mq_rq_to_pdu(rq);
>
> + if (queue->recv_ddgst != queue->exp_ddgst) {
> + req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
> dev_err(queue->ctrl->ctrl.device,
> "data digest error: recv %#x expected %#x\n",
> le32_to_cpu(queue->recv_ddgst),
> @@ -901,10 +915,6 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
> }
>
> if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
> - struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
> - pdu->command_id);
> - struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> -
> nvme_tcp_end_request(rq, le16_to_cpu(req->status));
> queue->nr_cqe++;
> }
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH 1/1] nvme-tcp: check for invalid request
2024-04-08 11:08 ` Sagi Grimberg
@ 2024-04-16 2:43 ` Chaitanya Kulkarni
0 siblings, 0 replies; 3+ messages in thread
From: Chaitanya Kulkarni @ 2024-04-16 2:43 UTC (permalink / raw)
To: Sagi Grimberg, soni.ankit
Cc: linux-nvme, sathya.m, d.palani, axboe, prakash.bv, anshul,
ankitvvsoni, hch, kbusch
On 4/8/24 04:08, Sagi Grimberg wrote:
>
>
> On 08/04/2024 7:53, soni.ankit@samsung.com wrote:
>> From: Ankit Soni <soni.ankit@samsung.com>
>>
>> The blk_mq_tag_to_rq() returns NULL for invalid tag.
>> Added check condition to prevent NULL Pointer derefernece.
>
> Does this fix an actual bug? Or a theoretic one?
if it does fix actual bug then why not add a blktest as I don'
think we have this condition covered because we now triggering
error path, unless I'm missing something ...
-ck
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2024-04-16 2:43 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <CGME20240408102150epcas5p2d5b4c3460fca7fe58520254a2466271b@epcas5p2.samsung.com>
2024-04-08 4:53 ` [PATCH 1/1] nvme-tcp: check for invalid request soni.ankit
2024-04-08 11:08 ` Sagi Grimberg
2024-04-16 2:43 ` Chaitanya Kulkarni
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).