All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
@ 2020-06-11 15:53 Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 1/6] nvmet: Cleanup discovery execute handlers Dakshaja Uppalapati
                   ` (7 more replies)
  0 siblings, 8 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch; +Cc: stable, nirranjan, bharat, dakshaja

The below error is seen in dmesg, while formatting the disks discovered on host.

dmesg:
        [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0

Patch 6 fixes it and there are 5 other dependent patches that also need to be 
pulled from upstream to stable, 5.4 and 4.19 branches.

Patch 1 dependent patch

Patch 2 dependent patch

Patch 3 dependent patch

Patch 4 dependent patch

Patch 5 dependent patch

Patch 6 fix patch

Thanks,
Dakshaja


Christoph Hellwig (5):
  nvmet: Cleanup discovery execute handlers
  nvmet: Introduce common execute function for get_log_page and identify
  nvmet: Introduce nvmet_dsm_len() helper
  nvmet: Remove the data_len field from the nvmet_req struct
  nvmet: Open code nvmet_req_execute()

Sagi Grimberg (1):
  nvmet: fix dsm failure when payload does not match sgl descriptor

 drivers/nvme/target/admin-cmd.c   | 128 +++++++++++++++++-------------
 drivers/nvme/target/core.c        |  23 ++++--
 drivers/nvme/target/discovery.c   |  62 +++++++--------
 drivers/nvme/target/fabrics-cmd.c |  15 +++-
 drivers/nvme/target/fc.c          |   4 +-
 drivers/nvme/target/io-cmd-bdev.c |  19 +++--
 drivers/nvme/target/io-cmd-file.c |  20 +++--
 drivers/nvme/target/loop.c        |   2 +-
 drivers/nvme/target/nvmet.h       |  11 ++-
 drivers/nvme/target/rdma.c        |   4 +-
 drivers/nvme/target/tcp.c         |   6 +-
 11 files changed, 176 insertions(+), 118 deletions(-)

-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH for-stable nvmet 1/6] nvmet: Cleanup discovery execute handlers
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
@ 2020-06-11 15:53 ` Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 2/6] nvmet: Introduce common execute function for get_log_page and identify Dakshaja Uppalapati
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch
  Cc: stable, nirranjan, bharat, dakshaja, Logan Gunthorpe, Jens Axboe

Push the lid and cns check into their respective handlers and, while
we're at it, rename the functions to be consistent with other
discovery handlers.


Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[split patch, update changelog]
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/nvme/target/discovery.c | 44 ++++++++++++++-------------------
 1 file changed, 19 insertions(+), 25 deletions(-)

diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 3764a8900850..825e61e61b0c 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -157,7 +157,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
 	return entries;
 }
 
-static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
 {
 	const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -171,6 +171,13 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 	u16 status = 0;
 	void *buffer;
 
+	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+		req->error_loc =
+			offsetof(struct nvme_get_log_page_command, lid);
+		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+		goto out;
+	}
+
 	/* Spec requires dword aligned offsets */
 	if (offset & 0x3) {
 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
@@ -227,12 +234,18 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 	nvmet_req_complete(req, status);
 }
 
-static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 	struct nvme_id_ctrl *id;
 	u16 status = 0;
 
+	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+		req->error_loc = offsetof(struct nvme_identify, cns);
+		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+		goto out;
+	}
+
 	id = kzalloc(sizeof(*id), GFP_KERNEL);
 	if (!id) {
 		status = NVME_SC_INTERNAL;
@@ -344,31 +357,12 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
 		return 0;
 	case nvme_admin_get_log_page:
 		req->data_len = nvmet_get_log_page_len(cmd);
-
-		switch (cmd->get_log_page.lid) {
-		case NVME_LOG_DISC:
-			req->execute = nvmet_execute_get_disc_log_page;
-			return 0;
-		default:
-			pr_err("unsupported get_log_page lid %d\n",
-			       cmd->get_log_page.lid);
-			req->error_loc =
-				offsetof(struct nvme_get_log_page_command, lid);
-			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
-		}
+		req->execute = nvmet_execute_disc_get_log_page;
+		return 0;
 	case nvme_admin_identify:
 		req->data_len = NVME_IDENTIFY_DATA_SIZE;
-		switch (cmd->identify.cns) {
-		case NVME_ID_CNS_CTRL:
-			req->execute =
-				nvmet_execute_identify_disc_ctrl;
-			return 0;
-		default:
-			pr_err("unsupported identify cns %d\n",
-			       cmd->identify.cns);
-			req->error_loc = offsetof(struct nvme_identify, cns);
-			return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
-		}
+		req->execute = nvmet_execute_disc_identify;
+		return 0;
 	default:
 		pr_err("unhandled cmd %d\n", cmd->common.opcode);
 		req->error_loc = offsetof(struct nvme_common_command, opcode);
-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-stable nvmet 2/6] nvmet: Introduce common execute function for get_log_page and identify
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 1/6] nvmet: Cleanup discovery execute handlers Dakshaja Uppalapati
@ 2020-06-11 15:53 ` Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 3/6] nvmet: Introduce nvmet_dsm_len() helper Dakshaja Uppalapati
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch
  Cc: stable, nirranjan, bharat, dakshaja, Logan Gunthorpe, Jens Axboe

Instead of picking the sub-command handler to execute in a nested
switch statement introduce a landing functions that calls out
to the appropriate sub-command handler.

This will allow us to have a common place in the handler to check
the transfer length in a future patch.

Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[split patch, update change log]
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/nvme/target/admin-cmd.c | 93 ++++++++++++++++++---------------
 1 file changed, 50 insertions(+), 43 deletions(-)

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 831a062d27cb..3665b45d6515 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -282,6 +282,33 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
 	nvmet_req_complete(req, status);
 }
 
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+	switch (req->cmd->get_log_page.lid) {
+	case NVME_LOG_ERROR:
+		return nvmet_execute_get_log_page_error(req);
+	case NVME_LOG_SMART:
+		return nvmet_execute_get_log_page_smart(req);
+	case NVME_LOG_FW_SLOT:
+		/*
+		 * We only support a single firmware slot which always is
+		 * active, so we can zero out the whole firmware slot log and
+		 * still claim to fully implement this mandatory log page.
+		 */
+		return nvmet_execute_get_log_page_noop(req);
+	case NVME_LOG_CHANGED_NS:
+		return nvmet_execute_get_log_changed_ns(req);
+	case NVME_LOG_CMD_EFFECTS:
+		return nvmet_execute_get_log_cmd_effects_ns(req);
+	case NVME_LOG_ANA:
+		return nvmet_execute_get_log_page_ana(req);
+	}
+	pr_err("unhandled lid %d on qid %d\n",
+	       req->cmd->get_log_page.lid, req->sq->qid);
+	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
+	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -565,6 +592,25 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
 	nvmet_req_complete(req, status);
 }
 
+static void nvmet_execute_identify(struct nvmet_req *req)
+{
+	switch (req->cmd->identify.cns) {
+	case NVME_ID_CNS_NS:
+		return nvmet_execute_identify_ns(req);
+	case NVME_ID_CNS_CTRL:
+		return nvmet_execute_identify_ctrl(req);
+	case NVME_ID_CNS_NS_ACTIVE_LIST:
+		return nvmet_execute_identify_nslist(req);
+	case NVME_ID_CNS_NS_DESC_LIST:
+		return nvmet_execute_identify_desclist(req);
+	}
+
+	pr_err("unhandled identify cns %d on qid %d\n",
+	       req->cmd->identify.cns, req->sq->qid);
+	req->error_loc = offsetof(struct nvme_identify, cns);
+	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
 /*
  * A "minimum viable" abort implementation: the command is mandatory in the
  * spec, but we are not required to do any useful work.  We couldn't really
@@ -819,52 +865,13 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 
 	switch (cmd->common.opcode) {
 	case nvme_admin_get_log_page:
+		req->execute = nvmet_execute_get_log_page;
 		req->data_len = nvmet_get_log_page_len(cmd);
-
-		switch (cmd->get_log_page.lid) {
-		case NVME_LOG_ERROR:
-			req->execute = nvmet_execute_get_log_page_error;
-			return 0;
-		case NVME_LOG_SMART:
-			req->execute = nvmet_execute_get_log_page_smart;
-			return 0;
-		case NVME_LOG_FW_SLOT:
-			/*
-			 * We only support a single firmware slot which always
-			 * is active, so we can zero out the whole firmware slot
-			 * log and still claim to fully implement this mandatory
-			 * log page.
-			 */
-			req->execute = nvmet_execute_get_log_page_noop;
-			return 0;
-		case NVME_LOG_CHANGED_NS:
-			req->execute = nvmet_execute_get_log_changed_ns;
-			return 0;
-		case NVME_LOG_CMD_EFFECTS:
-			req->execute = nvmet_execute_get_log_cmd_effects_ns;
-			return 0;
-		case NVME_LOG_ANA:
-			req->execute = nvmet_execute_get_log_page_ana;
-			return 0;
-		}
-		break;
+		return 0;
 	case nvme_admin_identify:
+		req->execute = nvmet_execute_identify;
 		req->data_len = NVME_IDENTIFY_DATA_SIZE;
-		switch (cmd->identify.cns) {
-		case NVME_ID_CNS_NS:
-			req->execute = nvmet_execute_identify_ns;
-			return 0;
-		case NVME_ID_CNS_CTRL:
-			req->execute = nvmet_execute_identify_ctrl;
-			return 0;
-		case NVME_ID_CNS_NS_ACTIVE_LIST:
-			req->execute = nvmet_execute_identify_nslist;
-			return 0;
-		case NVME_ID_CNS_NS_DESC_LIST:
-			req->execute = nvmet_execute_identify_desclist;
-			return 0;
-		}
-		break;
+		return 0;
 	case nvme_admin_abort_cmd:
 		req->execute = nvmet_execute_abort;
 		req->data_len = 0;
-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-stable nvmet 3/6] nvmet: Introduce nvmet_dsm_len() helper
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 1/6] nvmet: Cleanup discovery execute handlers Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 2/6] nvmet: Introduce common execute function for get_log_page and identify Dakshaja Uppalapati
@ 2020-06-11 15:53 ` Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 4/6] nvmet: Remove the data_len field from the nvmet_req struct Dakshaja Uppalapati
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch
  Cc: stable, nirranjan, bharat, dakshaja, Logan Gunthorpe, Jens Axboe

Similar to the nvmet_rw_len helper.

Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[split patch, update changelog]
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/nvme/target/io-cmd-file.c | 3 +--
 drivers/nvme/target/nvmet.h       | 6 ++++++
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 05453f5d1448..7481556da6e6 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -379,8 +379,7 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
 		return 0;
 	case nvme_cmd_dsm:
 		req->execute = nvmet_file_execute_dsm;
-		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
-			sizeof(struct nvme_dsm_range);
+		req->data_len = nvmet_dsm_len(req);
 		return 0;
 	case nvme_cmd_write_zeroes:
 		req->execute = nvmet_file_execute_write_zeroes;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c51f8dd01dc4..6ccf2d098d9f 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -495,6 +495,12 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
 			req->ns->blksize_shift;
 }
 
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
+{
+	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
+		sizeof(struct nvme_dsm_range);
+}
+
 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
 
 /* Convert a 32-bit number to a 16-bit 0's based number */
-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-stable nvmet 4/6] nvmet: Remove the data_len field from the nvmet_req struct
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
                   ` (2 preceding siblings ...)
  2020-06-11 15:53 ` [PATCH for-stable nvmet 3/6] nvmet: Introduce nvmet_dsm_len() helper Dakshaja Uppalapati
@ 2020-06-11 15:53 ` Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 5/6] nvmet: Open code nvmet_req_execute() Dakshaja Uppalapati
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch
  Cc: stable, nirranjan, bharat, dakshaja, Logan Gunthorpe, Jens Axboe

Instead of storing the expected length and checking it when it's
executed, just check the length inside the command themselves.

A new helper, nvmet_check_data_len() is created to help with this
check.

Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[split patch, udpate changelog]
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/nvme/target/admin-cmd.c   | 35 +++++++++++++++++++++----------
 drivers/nvme/target/core.c        | 16 ++++++++++----
 drivers/nvme/target/discovery.c   | 18 ++++++++++------
 drivers/nvme/target/fabrics-cmd.c | 15 ++++++++++---
 drivers/nvme/target/io-cmd-bdev.c | 19 +++++++++++------
 drivers/nvme/target/io-cmd-file.c | 19 ++++++++++-------
 drivers/nvme/target/nvmet.h       |  3 +--
 7 files changed, 86 insertions(+), 39 deletions(-)

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 3665b45d6515..cd2c3a79f3b5 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -31,7 +31,7 @@ u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
 
 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
 {
-	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
+	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
 }
 
 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
@@ -134,7 +134,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
 	u16 status = NVME_SC_INTERNAL;
 	unsigned long flags;
 
-	if (req->data_len != sizeof(*log))
+	if (req->transfer_len != sizeof(*log))
 		goto out;
 
 	log = kzalloc(sizeof(*log), GFP_KERNEL);
@@ -196,7 +196,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
 	u16 status = NVME_SC_INTERNAL;
 	size_t len;
 
-	if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
+	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
 		goto out;
 
 	mutex_lock(&ctrl->lock);
@@ -206,7 +206,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
 		len = ctrl->nr_changed_ns * sizeof(__le32);
 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
 	if (!status)
-		status = nvmet_zero_sgl(req, len, req->data_len - len);
+		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
 	ctrl->nr_changed_ns = 0;
 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
 	mutex_unlock(&ctrl->lock);
@@ -284,6 +284,9 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
 
 static void nvmet_execute_get_log_page(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
+		return;
+
 	switch (req->cmd->get_log_page.lid) {
 	case NVME_LOG_ERROR:
 		return nvmet_execute_get_log_page_error(req);
@@ -594,6 +597,9 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
 
 static void nvmet_execute_identify(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+		return;
+
 	switch (req->cmd->identify.cns) {
 	case NVME_ID_CNS_NS:
 		return nvmet_execute_identify_ns(req);
@@ -620,6 +626,8 @@ static void nvmet_execute_identify(struct nvmet_req *req)
  */
 static void nvmet_execute_abort(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, 0))
+		return;
 	nvmet_set_result(req, 1);
 	nvmet_req_complete(req, 0);
 }
@@ -704,6 +712,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
 	u16 status = 0;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	switch (cdw10 & 0xff) {
 	case NVME_FEAT_NUM_QUEUES:
 		nvmet_set_result(req,
@@ -767,6 +778,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
 	u16 status = 0;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	switch (cdw10 & 0xff) {
 	/*
 	 * These features are mandatory in the spec, but we don't
@@ -831,6 +845,9 @@ void nvmet_execute_async_event(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	mutex_lock(&ctrl->lock);
 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
 		mutex_unlock(&ctrl->lock);
@@ -847,6 +864,9 @@ void nvmet_execute_keep_alive(struct nvmet_req *req)
 {
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
 		ctrl->cntlid, ctrl->kato);
 
@@ -866,31 +886,24 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 	switch (cmd->common.opcode) {
 	case nvme_admin_get_log_page:
 		req->execute = nvmet_execute_get_log_page;
-		req->data_len = nvmet_get_log_page_len(cmd);
 		return 0;
 	case nvme_admin_identify:
 		req->execute = nvmet_execute_identify;
-		req->data_len = NVME_IDENTIFY_DATA_SIZE;
 		return 0;
 	case nvme_admin_abort_cmd:
 		req->execute = nvmet_execute_abort;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_set_features:
 		req->execute = nvmet_execute_set_features;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_get_features:
 		req->execute = nvmet_execute_get_features;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_async_event:
 		req->execute = nvmet_execute_async_event;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_keep_alive:
 		req->execute = nvmet_execute_keep_alive;
-		req->data_len = 0;
 		return 0;
 	}
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 57a4062cbb59..f45cd324eafb 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -931,13 +931,21 @@ void nvmet_req_uninit(struct nvmet_req *req)
 }
 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
 
-void nvmet_req_execute(struct nvmet_req *req)
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
 {
-	if (unlikely(req->data_len != req->transfer_len)) {
+	if (unlikely(data_len != req->transfer_len)) {
 		req->error_loc = offsetof(struct nvme_common_command, dptr);
 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
-	} else
-		req->execute(req);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(nvmet_check_data_len);
+
+void nvmet_req_execute(struct nvmet_req *req)
+{
+	req->execute(req);
 }
 EXPORT_SYMBOL_GPL(nvmet_req_execute);
 
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 825e61e61b0c..7a868c3e8e95 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -171,6 +171,9 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
 	u16 status = 0;
 	void *buffer;
 
+	if (!nvmet_check_data_len(req, data_len))
+		return;
+
 	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
 		req->error_loc =
 			offsetof(struct nvme_get_log_page_command, lid);
@@ -240,6 +243,9 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
 	struct nvme_id_ctrl *id;
 	u16 status = 0;
 
+	if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+		return;
+
 	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
 		req->error_loc = offsetof(struct nvme_identify, cns);
 		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
@@ -286,6 +292,9 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
 	u16 stat;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	switch (cdw10 & 0xff) {
 	case NVME_FEAT_KATO:
 		stat = nvmet_set_feat_kato(req);
@@ -309,6 +318,9 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
 	u16 stat = 0;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	switch (cdw10 & 0xff) {
 	case NVME_FEAT_KATO:
 		nvmet_get_feat_kato(req);
@@ -341,26 +353,20 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
 	switch (cmd->common.opcode) {
 	case nvme_admin_set_features:
 		req->execute = nvmet_execute_disc_set_features;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_get_features:
 		req->execute = nvmet_execute_disc_get_features;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_async_event:
 		req->execute = nvmet_execute_async_event;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_keep_alive:
 		req->execute = nvmet_execute_keep_alive;
-		req->data_len = 0;
 		return 0;
 	case nvme_admin_get_log_page:
-		req->data_len = nvmet_get_log_page_len(cmd);
 		req->execute = nvmet_execute_disc_get_log_page;
 		return 0;
 	case nvme_admin_identify:
-		req->data_len = NVME_IDENTIFY_DATA_SIZE;
 		req->execute = nvmet_execute_disc_identify;
 		return 0;
 	default:
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 4e9004fe5c6f..feef15c38ec9 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,6 +12,9 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
 	u64 val = le64_to_cpu(req->cmd->prop_set.value);
 	u16 status = 0;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	if (req->cmd->prop_set.attrib & 1) {
 		req->error_loc =
 			offsetof(struct nvmf_property_set_command, attrib);
@@ -38,6 +41,9 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
 	u16 status = 0;
 	u64 val = 0;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	if (req->cmd->prop_get.attrib & 1) {
 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
 		case NVME_REG_CAP:
@@ -82,11 +88,9 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
 
 	switch (cmd->fabrics.fctype) {
 	case nvme_fabrics_type_property_set:
-		req->data_len = 0;
 		req->execute = nvmet_execute_prop_set;
 		break;
 	case nvme_fabrics_type_property_get:
-		req->data_len = 0;
 		req->execute = nvmet_execute_prop_get;
 		break;
 	default:
@@ -152,6 +156,9 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
 	struct nvmet_ctrl *ctrl = NULL;
 	u16 status = 0;
 
+	if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+		return;
+
 	d = kmalloc(sizeof(*d), GFP_KERNEL);
 	if (!d) {
 		status = NVME_SC_INTERNAL;
@@ -216,6 +223,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
 	u16 qid = le16_to_cpu(c->qid);
 	u16 status = 0;
 
+	if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+		return;
+
 	d = kmalloc(sizeof(*d), GFP_KERNEL);
 	if (!d) {
 		status = NVME_SC_INTERNAL;
@@ -286,7 +296,6 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 	}
 
-	req->data_len = sizeof(struct nvmf_connect_data);
 	if (cmd->connect.qid == 0)
 		req->execute = nvmet_execute_admin_connect;
 	else
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 32008d85172b..2d62347573fa 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -150,6 +150,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
 	sector_t sector;
 	int op, op_flags = 0, i;
 
+	if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+		return;
+
 	if (!req->sg_cnt) {
 		nvmet_req_complete(req, 0);
 		return;
@@ -170,7 +173,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
 	sector = le64_to_cpu(req->cmd->rw.slba);
 	sector <<= (req->ns->blksize_shift - 9);
 
-	if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
 		bio = &req->b.inline_bio;
 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
 	} else {
@@ -207,6 +210,9 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
 {
 	struct bio *bio = &req->b.inline_bio;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
 	bio_set_dev(bio, req->ns->bdev);
 	bio->bi_private = req;
@@ -274,6 +280,9 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
 
 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+		return;
+
 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
 	case NVME_DSMGMT_AD:
 		nvmet_bdev_execute_discard(req);
@@ -295,6 +304,9 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
 	sector_t nr_sector;
 	int ret;
 
+	if (!nvmet_check_data_len(req, 0))
+		return;
+
 	sector = le64_to_cpu(write_zeroes->slba) <<
 		(req->ns->blksize_shift - 9);
 	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
@@ -319,20 +331,15 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
 	case nvme_cmd_read:
 	case nvme_cmd_write:
 		req->execute = nvmet_bdev_execute_rw;
-		req->data_len = nvmet_rw_len(req);
 		return 0;
 	case nvme_cmd_flush:
 		req->execute = nvmet_bdev_execute_flush;
-		req->data_len = 0;
 		return 0;
 	case nvme_cmd_dsm:
 		req->execute = nvmet_bdev_execute_dsm;
-		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
-			sizeof(struct nvme_dsm_range);
 		return 0;
 	case nvme_cmd_write_zeroes:
 		req->execute = nvmet_bdev_execute_write_zeroes;
-		req->data_len = 0;
 		return 0;
 	default:
 		pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 7481556da6e6..caebfce06605 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -126,7 +126,7 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 			mempool_free(req->f.bvec, req->ns->bvec_pool);
 	}
 
-	if (unlikely(ret != req->data_len))
+	if (unlikely(ret != req->transfer_len))
 		status = errno_to_nvme_status(req, ret);
 	nvmet_req_complete(req, status);
 }
@@ -146,7 +146,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 		is_sync = true;
 
 	pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
-	if (unlikely(pos + req->data_len > req->ns->size)) {
+	if (unlikely(pos + req->transfer_len > req->ns->size)) {
 		nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
 		return true;
 	}
@@ -173,7 +173,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 		nr_bvec--;
 	}
 
-	if (WARN_ON_ONCE(total_len != req->data_len)) {
+	if (WARN_ON_ONCE(total_len != req->transfer_len)) {
 		ret = -EIO;
 		goto complete;
 	}
@@ -232,6 +232,9 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
 {
 	ssize_t nr_bvec = req->sg_cnt;
 
+	if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+		return;
+
 	if (!req->sg_cnt || !nr_bvec) {
 		nvmet_req_complete(req, 0);
 		return;
@@ -273,6 +276,8 @@ static void nvmet_file_flush_work(struct work_struct *w)
 
 static void nvmet_file_execute_flush(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, 0))
+		return;
 	INIT_WORK(&req->f.work, nvmet_file_flush_work);
 	schedule_work(&req->f.work);
 }
@@ -331,6 +336,8 @@ static void nvmet_file_dsm_work(struct work_struct *w)
 
 static void nvmet_file_execute_dsm(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+		return;
 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
 	schedule_work(&req->f.work);
 }
@@ -359,6 +366,8 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
 
 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
 {
+	if (!nvmet_check_data_len(req, 0))
+		return;
 	INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
 	schedule_work(&req->f.work);
 }
@@ -371,19 +380,15 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
 	case nvme_cmd_read:
 	case nvme_cmd_write:
 		req->execute = nvmet_file_execute_rw;
-		req->data_len = nvmet_rw_len(req);
 		return 0;
 	case nvme_cmd_flush:
 		req->execute = nvmet_file_execute_flush;
-		req->data_len = 0;
 		return 0;
 	case nvme_cmd_dsm:
 		req->execute = nvmet_file_execute_dsm;
-		req->data_len = nvmet_dsm_len(req);
 		return 0;
 	case nvme_cmd_write_zeroes:
 		req->execute = nvmet_file_execute_write_zeroes;
-		req->data_len = 0;
 		return 0;
 	default:
 		pr_err("unhandled cmd for file ns %d on qid %d\n",
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ccf2d098d9f..ff55f1005b35 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -304,8 +304,6 @@ struct nvmet_req {
 		} f;
 	};
 	int			sg_cnt;
-	/* data length as parsed from the command: */
-	size_t			data_len;
 	/* data length as parsed from the SGL descriptor: */
 	size_t			transfer_len;
 
@@ -375,6 +373,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
+bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
 void nvmet_req_execute(struct nvmet_req *req);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 int nvmet_req_alloc_sgl(struct nvmet_req *req);
-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-stable nvmet 5/6] nvmet: Open code nvmet_req_execute()
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
                   ` (3 preceding siblings ...)
  2020-06-11 15:53 ` [PATCH for-stable nvmet 4/6] nvmet: Remove the data_len field from the nvmet_req struct Dakshaja Uppalapati
@ 2020-06-11 15:53 ` Dakshaja Uppalapati
  2020-06-11 15:53 ` [PATCH for-stable nvmet 6/6] nvmet: fix dsm failure when payload does not match sgl descriptor Dakshaja Uppalapati
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch
  Cc: stable, nirranjan, bharat, dakshaja, Logan Gunthorpe, Jens Axboe

Now that nvmet_req_execute does nothing, open code it.

Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[split patch, update changelog]
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/nvme/target/core.c  | 6 ------
 drivers/nvme/target/fc.c    | 4 ++--
 drivers/nvme/target/loop.c  | 2 +-
 drivers/nvme/target/nvmet.h | 1 -
 drivers/nvme/target/rdma.c  | 4 ++--
 drivers/nvme/target/tcp.c   | 6 +++---
 6 files changed, 8 insertions(+), 15 deletions(-)

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f45cd324eafb..03b56ed7eca0 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -943,12 +943,6 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
 }
 EXPORT_SYMBOL_GPL(nvmet_check_data_len);
 
-void nvmet_req_execute(struct nvmet_req *req)
-{
-	req->execute(req);
-}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
-
 int nvmet_req_alloc_sgl(struct nvmet_req *req)
 {
 	struct pci_dev *p2p_dev = NULL;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ce8d819f86cc..7f9c11138b93 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -2015,7 +2015,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
 		}
 
 		/* data transfer complete, resume with nvmet layer */
-		nvmet_req_execute(&fod->req);
+		fod->req.execute(&fod->req);
 		break;
 
 	case NVMET_FCOP_READDATA:
@@ -2231,7 +2231,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 	 * can invoke the nvmet_layer now. If read data, cmd completion will
 	 * push the data
 	 */
-	nvmet_req_execute(&fod->req);
+	fod->req.execute(&fod->req);
 	return;
 
 transport_error:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 11f5aea97d1b..f4b878b113f7 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -126,7 +126,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
 	struct nvme_loop_iod *iod =
 		container_of(work, struct nvme_loop_iod, work);
 
-	nvmet_req_execute(&iod->req);
+	iod->req.execute(&iod->req);
 }
 
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index ff55f1005b35..46df45e837c9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -374,7 +374,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
 bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
-void nvmet_req_execute(struct nvmet_req *req);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 int nvmet_req_alloc_sgl(struct nvmet_req *req);
 void nvmet_req_free_sgl(struct nvmet_req *req);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 36d906a7f70d..248e68da2e13 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
 		return;
 	}
 
-	nvmet_req_execute(&rsp->req);
+	rsp->req.execute(&rsp->req);
 }
 
 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
 				queue->cm_id->port_num, &rsp->read_cqe, NULL))
 			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
 	} else {
-		nvmet_req_execute(&rsp->req);
+		rsp->req.execute(&rsp->req);
 	}
 
 	return true;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 22014e76d771..72777d558f1c 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -938,7 +938,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
 		goto out;
 	}
 
-	nvmet_req_execute(&queue->cmd->req);
+	queue->cmd->req.execute(&queue->cmd->req);
 out:
 	nvmet_prepare_receive_pdu(queue);
 	return ret;
@@ -1058,7 +1058,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
 			nvmet_tcp_prep_recv_ddgst(cmd);
 			return 0;
 		}
-		nvmet_req_execute(&cmd->req);
+		cmd->req.execute(&cmd->req);
 	}
 
 	nvmet_prepare_receive_pdu(queue);
@@ -1098,7 +1098,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
 
 	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
 	    cmd->rbytes_done == cmd->req.transfer_len)
-		nvmet_req_execute(&cmd->req);
+		cmd->req.execute(&cmd->req);
 	ret = 0;
 out:
 	nvmet_prepare_receive_pdu(queue);
-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-stable nvmet 6/6] nvmet: fix dsm failure when payload does not match sgl descriptor
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
                   ` (4 preceding siblings ...)
  2020-06-11 15:53 ` [PATCH for-stable nvmet 5/6] nvmet: Open code nvmet_req_execute() Dakshaja Uppalapati
@ 2020-06-11 15:53 ` Dakshaja Uppalapati
  2020-06-17 13:54 ` [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
  2020-06-17 14:15 ` Keith Busch
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-11 15:53 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch; +Cc: stable, nirranjan, bharat, dakshaja

The host is allowed to pass the controller an sgl describing a buffer
that is larger than the dsm payload itself, allow it when executing
dsm.

Reported-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>,
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/target/core.c        | 11 +++++++++++
 drivers/nvme/target/io-cmd-bdev.c |  2 +-
 drivers/nvme/target/io-cmd-file.c |  2 +-
 drivers/nvme/target/nvmet.h       |  1 +
 4 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 03b56ed7eca0..7c91e1d64ed6 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -943,6 +943,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
 }
 EXPORT_SYMBOL_GPL(nvmet_check_data_len);
 
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+	if (unlikely(data_len > req->transfer_len)) {
+		req->error_loc = offsetof(struct nvme_common_command, dptr);
+		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+		return false;
+	}
+
+	return true;
+}
+
 int nvmet_req_alloc_sgl(struct nvmet_req *req)
 {
 	struct pci_dev *p2p_dev = NULL;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 2d62347573fa..7a88fd75b79c 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
 
 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
 {
-	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
 		return;
 
 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index caebfce06605..cd5670b83118 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w)
 
 static void nvmet_file_execute_dsm(struct nvmet_req *req)
 {
-	if (!nvmet_check_data_len(req, nvmet_dsm_len(req)))
+	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
 		return;
 	INIT_WORK(&req->f.work, nvmet_file_dsm_work);
 	schedule_work(&req->f.work);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 46df45e837c9..eda28b22a2c8 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
 bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 int nvmet_req_alloc_sgl(struct nvmet_req *req);
 void nvmet_req_free_sgl(struct nvmet_req *req);
-- 
2.18.0.232.gb7bd9486b.dirty


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
                   ` (5 preceding siblings ...)
  2020-06-11 15:53 ` [PATCH for-stable nvmet 6/6] nvmet: fix dsm failure when payload does not match sgl descriptor Dakshaja Uppalapati
@ 2020-06-17 13:54 ` Dakshaja Uppalapati
  2020-06-17 14:15 ` Keith Busch
  7 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-17 13:54 UTC (permalink / raw)
  To: eduard, kbusch, sagi, hch
  Cc: stable, Nirranjan Kirubaharan, Potnuri Bharat Teja, Dakshaja Uppalapati

On Thursday, June 06/11/20, 2020 at 21:23:33 +0530, Dakshaja Uppalapati wrote:
> The below error is seen in dmesg, while formatting the disks discovered on host.
> 
> dmesg:
>         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> 
> Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> pulled from upstream to stable, 5.4 and 4.19 branches.
> 
> Patch 1 dependent patch
> 
> Patch 2 dependent patch
> 
> Patch 3 dependent patch
> 
> Patch 4 dependent patch
> 
> Patch 5 dependent patch
> 
> Patch 6 fix patch

Hi all,

Gentle Reminder.

Thanks,
Dakshaja

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
  2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
                   ` (6 preceding siblings ...)
  2020-06-17 13:54 ` [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
@ 2020-06-17 14:15 ` Keith Busch
  2020-06-22 11:21   ` Dakshaja Uppalapati
  7 siblings, 1 reply; 14+ messages in thread
From: Keith Busch @ 2020-06-17 14:15 UTC (permalink / raw)
  To: Dakshaja Uppalapati; +Cc: eduard, sagi, hch, stable, nirranjan, bharat

On Thu, Jun 11, 2020 at 09:23:33PM +0530, Dakshaja Uppalapati wrote:
> The below error is seen in dmesg, while formatting the disks discovered on host.
> 
> dmesg:
>         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> 
> Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> pulled from upstream to stable, 5.4 and 4.19 branches.
> 
> Patch 1 dependent patch
> 
> Patch 2 dependent patch
> 
> Patch 3 dependent patch
> 
> Patch 4 dependent patch
> 
> Patch 5 dependent patch
> 
> Patch 6 fix patch

1. You need to copy the linux-nvme mainling list for linux nvme kernel patches.

2. If you're sending someone else's patch, the patch is supposed to have
the From: tag so the author is appropriately identified.

3. Stable patches must referece the upstream commit ID.

As for this particular issue, while stable patches are required to
reference an upstream commit, you don't need to bring in dependent
patches. You are allowed to write an equivalent fix specific to the
stable branch so that stable doesn't need to take a bunch of unrelated
changes. For example, it looks like this particular isssue can be fixed
with the following simple stable patch:

---
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 1096dd01ca22..9399e23e69c3 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -234,8 +234,8 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
 		return 0;
 	case nvme_cmd_dsm:
 		req->execute = nvmet_bdev_execute_dsm;
-		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
-			sizeof(struct nvme_dsm_range);
+		req->data_len = max((le32_to_cpu(cmd->dsm.nr) + 1) *
+			sizeof(struct nvme_dsm_range), req->transfer_len);
 		return 0;
 	case nvme_cmd_write_zeroes:
 		req->execute = nvmet_bdev_execute_write_zeroes;
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 39d972e2595f..f2fa6482d6dd 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -332,8 +332,8 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
 		return 0;
 	case nvme_cmd_dsm:
 		req->execute = nvmet_file_execute_dsm;
-		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
-			sizeof(struct nvme_dsm_range);
+		req->data_len = max((le32_to_cpu(cmd->dsm.nr) + 1) *
+			sizeof(struct nvme_dsm_range), req->transfer_len);
 		return 0;
 	case nvme_cmd_write_zeroes:
 		req->execute = nvmet_file_execute_write_zeroes;
--

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
  2020-06-17 14:15 ` Keith Busch
@ 2020-06-22 11:21   ` Dakshaja Uppalapati
  2020-06-29 13:55       ` Dakshaja Uppalapati
  2020-09-04 11:33       ` Greg KH
  0 siblings, 2 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-22 11:21 UTC (permalink / raw)
  To: Keith Busch
  Cc: linux-nvme, eduard, sagi, hch, stable, Nirranjan Kirubaharan,
	Potnuri Bharat Teja

On Wednesday, June 06/17/20, 2020 at 07:15:41 -0700, Keith Busch wrote:
> On Thu, Jun 11, 2020 at 09:23:33PM +0530, Dakshaja Uppalapati wrote:
> > The below error is seen in dmesg, while formatting the disks discovered on host.
> > 
> > dmesg:
> >         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> > 
> > Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> > pulled from upstream to stable, 5.4 and 4.19 branches.
> > 
> > Patch 1 dependent patch
> > 
> > Patch 2 dependent patch
> > 
> > Patch 3 dependent patch
> > 
> > Patch 4 dependent patch
> > 
> > Patch 5 dependent patch
> > 
> > Patch 6 fix patch
> 
> 1. You need to copy the linux-nvme mainling list for linux nvme kernel patches.
> 
> 2. If you're sending someone else's patch, the patch is supposed to have
> the From: tag so the author is appropriately identified.
> 
> 3. Stable patches must referece the upstream commit ID.
> 
> As for this particular issue, while stable patches are required to
> reference an upstream commit, you don't need to bring in dependent
> patches. You are allowed to write an equivalent fix specific to the
> stable branch so that stable doesn't need to take a bunch of unrelated
> changes. For example, it looks like this particular isssue can be fixed
> with the following simple stable patch:
>

Hi keith,

Thanks for the review.

I initially tried pushing only the fix + required portion of the dependent 
patches(https://www.spinics.net/lists/stable/msg387744.html) but as that 
approach is discouraged in stable tree, I submitted all the patches as it is.

Here are the ways to fix the issue in stable tree:

•  push fix + all dependent patches
•  push fix + custom patch of dependent patches
•  revert the culprit patch.

Please let me know how this issue can be resolved in stable tree.

Thanks,
Dakshaja


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
  2020-06-22 11:21   ` Dakshaja Uppalapati
@ 2020-06-29 13:55       ` Dakshaja Uppalapati
  2020-09-04 11:33       ` Greg KH
  1 sibling, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-29 13:55 UTC (permalink / raw)
  To: Keith Busch
  Cc: linux-nvme, eduard, sagi, hch, stable, Nirranjan Kirubaharan,
	Potnuri Bharat Teja

On Monday, June 06/22/20, 2020 at 16:51:43 +0530, Dakshaja Uppalapati wrote:
> On Wednesday, June 06/17/20, 2020 at 07:15:41 -0700, Keith Busch wrote:
> > On Thu, Jun 11, 2020 at 09:23:33PM +0530, Dakshaja Uppalapati wrote:
> > > The below error is seen in dmesg, while formatting the disks discovered on host.
> > > 
> > > dmesg:
> > >         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> > > 
> > > Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> > > pulled from upstream to stable, 5.4 and 4.19 branches.
> > > 
> > > Patch 1 dependent patch
> > > 
> > > Patch 2 dependent patch
> > > 
> > > Patch 3 dependent patch
> > > 
> > > Patch 4 dependent patch
> > > 
> > > Patch 5 dependent patch
> > > 
> > > Patch 6 fix patch
> > 
> > 1. You need to copy the linux-nvme mainling list for linux nvme kernel patches.
> > 
> > 2. If you're sending someone else's patch, the patch is supposed to have
> > the From: tag so the author is appropriately identified.
> > 
> > 3. Stable patches must referece the upstream commit ID.
> > 
> > As for this particular issue, while stable patches are required to
> > reference an upstream commit, you don't need to bring in dependent
> > patches. You are allowed to write an equivalent fix specific to the
> > stable branch so that stable doesn't need to take a bunch of unrelated
> > changes. For example, it looks like this particular isssue can be fixed
> > with the following simple stable patch:
> >
> 
> Hi keith,
> 
> Thanks for the review.
> 
> I initially tried pushing only the fix + required portion of the dependent 
> patches(https://www.spinics.net/lists/stable/msg387744.html) but as that 
> approach is discouraged in stable tree, I submitted all the patches as it is.
> 
> Here are the ways to fix the issue in stable tree:
> 
> •  push fix + all dependent patches
> •  push fix + custom patch of dependent patches
> •  revert the culprit patch.
> 
> Please let me know how this issue can be resolved in stable tree.
>

Hi keith,

Gentle reminder.

Thanks,
Dakshaja

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
@ 2020-06-29 13:55       ` Dakshaja Uppalapati
  0 siblings, 0 replies; 14+ messages in thread
From: Dakshaja Uppalapati @ 2020-06-29 13:55 UTC (permalink / raw)
  To: Keith Busch
  Cc: sagi, eduard, Potnuri Bharat Teja, Nirranjan Kirubaharan, stable,
	linux-nvme, hch

On Monday, June 06/22/20, 2020 at 16:51:43 +0530, Dakshaja Uppalapati wrote:
> On Wednesday, June 06/17/20, 2020 at 07:15:41 -0700, Keith Busch wrote:
> > On Thu, Jun 11, 2020 at 09:23:33PM +0530, Dakshaja Uppalapati wrote:
> > > The below error is seen in dmesg, while formatting the disks discovered on host.
> > > 
> > > dmesg:
> > >         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> > > 
> > > Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> > > pulled from upstream to stable, 5.4 and 4.19 branches.
> > > 
> > > Patch 1 dependent patch
> > > 
> > > Patch 2 dependent patch
> > > 
> > > Patch 3 dependent patch
> > > 
> > > Patch 4 dependent patch
> > > 
> > > Patch 5 dependent patch
> > > 
> > > Patch 6 fix patch
> > 
> > 1. You need to copy the linux-nvme mainling list for linux nvme kernel patches.
> > 
> > 2. If you're sending someone else's patch, the patch is supposed to have
> > the From: tag so the author is appropriately identified.
> > 
> > 3. Stable patches must referece the upstream commit ID.
> > 
> > As for this particular issue, while stable patches are required to
> > reference an upstream commit, you don't need to bring in dependent
> > patches. You are allowed to write an equivalent fix specific to the
> > stable branch so that stable doesn't need to take a bunch of unrelated
> > changes. For example, it looks like this particular isssue can be fixed
> > with the following simple stable patch:
> >
> 
> Hi keith,
> 
> Thanks for the review.
> 
> I initially tried pushing only the fix + required portion of the dependent 
> patches(https://www.spinics.net/lists/stable/msg387744.html) but as that 
> approach is discouraged in stable tree, I submitted all the patches as it is.
> 
> Here are the ways to fix the issue in stable tree:
> 
> •  push fix + all dependent patches
> •  push fix + custom patch of dependent patches
> •  revert the culprit patch.
> 
> Please let me know how this issue can be resolved in stable tree.
>

Hi keith,

Gentle reminder.

Thanks,
Dakshaja

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
  2020-06-22 11:21   ` Dakshaja Uppalapati
@ 2020-09-04 11:33       ` Greg KH
  2020-09-04 11:33       ` Greg KH
  1 sibling, 0 replies; 14+ messages in thread
From: Greg KH @ 2020-09-04 11:33 UTC (permalink / raw)
  To: Dakshaja Uppalapati
  Cc: Keith Busch, linux-nvme, eduard, sagi, hch, stable,
	Nirranjan Kirubaharan, Potnuri Bharat Teja

On Mon, Jun 22, 2020 at 04:51:44PM +0530, Dakshaja Uppalapati wrote:
> On Wednesday, June 06/17/20, 2020 at 07:15:41 -0700, Keith Busch wrote:
> > On Thu, Jun 11, 2020 at 09:23:33PM +0530, Dakshaja Uppalapati wrote:
> > > The below error is seen in dmesg, while formatting the disks discovered on host.
> > > 
> > > dmesg:
> > >         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> > > 
> > > Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> > > pulled from upstream to stable, 5.4 and 4.19 branches.
> > > 
> > > Patch 1 dependent patch
> > > 
> > > Patch 2 dependent patch
> > > 
> > > Patch 3 dependent patch
> > > 
> > > Patch 4 dependent patch
> > > 
> > > Patch 5 dependent patch
> > > 
> > > Patch 6 fix patch
> > 
> > 1. You need to copy the linux-nvme mainling list for linux nvme kernel patches.
> > 
> > 2. If you're sending someone else's patch, the patch is supposed to have
> > the From: tag so the author is appropriately identified.
> > 
> > 3. Stable patches must referece the upstream commit ID.
> > 
> > As for this particular issue, while stable patches are required to
> > reference an upstream commit, you don't need to bring in dependent
> > patches. You are allowed to write an equivalent fix specific to the
> > stable branch so that stable doesn't need to take a bunch of unrelated
> > changes. For example, it looks like this particular isssue can be fixed
> > with the following simple stable patch:
> >
> 
> Hi keith,
> 
> Thanks for the review.
> 
> I initially tried pushing only the fix + required portion of the dependent 
> patches(https://www.spinics.net/lists/stable/msg387744.html) but as that 
> approach is discouraged in stable tree, I submitted all the patches as it is.
> 
> Here are the ways to fix the issue in stable tree:
> 
> •  push fix + all dependent patches
> •  push fix + custom patch of dependent patches
> •  revert the culprit patch.
> 
> Please let me know how this issue can be resolved in stable tree.

Is this still an issue?

I can't take a series of patches that is not in Linus's tree, like you
sent here, that's a non-starter.

If you want specific commits to be applied, that is one thing, and if
you need a specific patch to be reverted because it breaks things,
please provide that git id.

confused,

greg k-h

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error.
@ 2020-09-04 11:33       ` Greg KH
  0 siblings, 0 replies; 14+ messages in thread
From: Greg KH @ 2020-09-04 11:33 UTC (permalink / raw)
  To: Dakshaja Uppalapati
  Cc: sagi, eduard, Potnuri Bharat Teja, Nirranjan Kirubaharan, stable,
	linux-nvme, Keith Busch, hch

On Mon, Jun 22, 2020 at 04:51:44PM +0530, Dakshaja Uppalapati wrote:
> On Wednesday, June 06/17/20, 2020 at 07:15:41 -0700, Keith Busch wrote:
> > On Thu, Jun 11, 2020 at 09:23:33PM +0530, Dakshaja Uppalapati wrote:
> > > The below error is seen in dmesg, while formatting the disks discovered on host.
> > > 
> > > dmesg:
> > >         [  636.733374] blk_update_request: I/O error, dev nvme4n1, sector 0 op 0x3:(DISCARD) flags 0x800 phys_seg 1 prio class 0
> > > 
> > > Patch 6 fixes it and there are 5 other dependent patches that also need to be 
> > > pulled from upstream to stable, 5.4 and 4.19 branches.
> > > 
> > > Patch 1 dependent patch
> > > 
> > > Patch 2 dependent patch
> > > 
> > > Patch 3 dependent patch
> > > 
> > > Patch 4 dependent patch
> > > 
> > > Patch 5 dependent patch
> > > 
> > > Patch 6 fix patch
> > 
> > 1. You need to copy the linux-nvme mainling list for linux nvme kernel patches.
> > 
> > 2. If you're sending someone else's patch, the patch is supposed to have
> > the From: tag so the author is appropriately identified.
> > 
> > 3. Stable patches must referece the upstream commit ID.
> > 
> > As for this particular issue, while stable patches are required to
> > reference an upstream commit, you don't need to bring in dependent
> > patches. You are allowed to write an equivalent fix specific to the
> > stable branch so that stable doesn't need to take a bunch of unrelated
> > changes. For example, it looks like this particular isssue can be fixed
> > with the following simple stable patch:
> >
> 
> Hi keith,
> 
> Thanks for the review.
> 
> I initially tried pushing only the fix + required portion of the dependent 
> patches(https://www.spinics.net/lists/stable/msg387744.html) but as that 
> approach is discouraged in stable tree, I submitted all the patches as it is.
> 
> Here are the ways to fix the issue in stable tree:
> 
> •  push fix + all dependent patches
> •  push fix + custom patch of dependent patches
> •  revert the culprit patch.
> 
> Please let me know how this issue can be resolved in stable tree.

Is this still an issue?

I can't take a series of patches that is not in Linus's tree, like you
sent here, that's a non-starter.

If you want specific commits to be applied, that is one thing, and if
you need a specific patch to be reverted because it breaks things,
please provide that git id.

confused,

greg k-h

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2020-09-04 11:33 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-11 15:53 [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
2020-06-11 15:53 ` [PATCH for-stable nvmet 1/6] nvmet: Cleanup discovery execute handlers Dakshaja Uppalapati
2020-06-11 15:53 ` [PATCH for-stable nvmet 2/6] nvmet: Introduce common execute function for get_log_page and identify Dakshaja Uppalapati
2020-06-11 15:53 ` [PATCH for-stable nvmet 3/6] nvmet: Introduce nvmet_dsm_len() helper Dakshaja Uppalapati
2020-06-11 15:53 ` [PATCH for-stable nvmet 4/6] nvmet: Remove the data_len field from the nvmet_req struct Dakshaja Uppalapati
2020-06-11 15:53 ` [PATCH for-stable nvmet 5/6] nvmet: Open code nvmet_req_execute() Dakshaja Uppalapati
2020-06-11 15:53 ` [PATCH for-stable nvmet 6/6] nvmet: fix dsm failure when payload does not match sgl descriptor Dakshaja Uppalapati
2020-06-17 13:54 ` [PATCH for-stable nvmet 0/6] nvme: Fix for blk_update_request IO error Dakshaja Uppalapati
2020-06-17 14:15 ` Keith Busch
2020-06-22 11:21   ` Dakshaja Uppalapati
2020-06-29 13:55     ` Dakshaja Uppalapati
2020-06-29 13:55       ` Dakshaja Uppalapati
2020-09-04 11:33     ` Greg KH
2020-09-04 11:33       ` Greg KH

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.