From: Mike Christie <michael.christie@oracle.com>
To: lduncan@suse.com, cleech@redhat.com, njavali@marvell.com,
mrangankar@marvell.com, GR-QLogic-Storage-Upstream@marvell.com,
varun@chelsio.com, martin.petersen@oracle.com,
linux-scsi@vger.kernel.org, jejb@linux.ibm.com
Subject: [PATCH 09/22] qedi: implement alloc_task_priv/free_task_priv
Date: Thu, 17 Dec 2020 00:41:59 -0600 [thread overview]
Message-ID: <1608187332-4434-10-git-send-email-michael.christie@oracle.com> (raw)
In-Reply-To: <1608187332-4434-1-git-send-email-michael.christie@oracle.com>
Have qedi use the alloc_task_priv/free_task_priv instead of
rolling its own loops.
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
drivers/scsi/qedi/qedi_iscsi.c | 106 ++++++++++++++++-------------------------
1 file changed, 41 insertions(+), 65 deletions(-)
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 08c0540..a76f595 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -160,32 +160,30 @@ static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
return -ENOMEM;
}
-static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
- struct iscsi_session *session)
+static void qedi_free_sget(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
{
- int i;
+ if (!cmd->io_tbl.sge_tbl)
+ return;
- for (i = 0; i < session->cmds_max; i++) {
- struct iscsi_task *task = session->cmds[i];
- struct qedi_cmd *cmd = task->dd_data;
-
- if (cmd->io_tbl.sge_tbl)
- dma_free_coherent(&qedi->pdev->dev,
- QEDI_ISCSI_MAX_BDS_PER_CMD *
- sizeof(struct scsi_sge),
- cmd->io_tbl.sge_tbl,
- cmd->io_tbl.sge_tbl_dma);
-
- if (cmd->sense_buffer)
- dma_free_coherent(&qedi->pdev->dev,
- SCSI_SENSE_BUFFERSIZE,
- cmd->sense_buffer,
- cmd->sense_buffer_dma);
- }
+ dma_free_coherent(&qedi->pdev->dev,
+ QEDI_ISCSI_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
+ cmd->io_tbl.sge_tbl, cmd->io_tbl.sge_tbl_dma);
}
-static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
- struct qedi_cmd *cmd)
+static void qedi_free_task_priv(struct iscsi_session *session,
+ struct iscsi_task *task)
+{
+ struct qedi_ctx *qedi = iscsi_host_priv(session->host);
+ struct qedi_cmd *cmd = task->dd_data;
+
+ qedi_free_sget(qedi, cmd);
+
+ if (cmd->sense_buffer)
+ dma_free_coherent(&qedi->pdev->dev, SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer, cmd->sense_buffer_dma);
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
{
struct qedi_io_bdt *io = &cmd->io_tbl;
struct scsi_sge *sge;
@@ -195,8 +193,8 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
sizeof(*sge),
&io->sge_tbl_dma, GFP_KERNEL);
if (!io->sge_tbl) {
- iscsi_session_printk(KERN_ERR, session,
- "Could not allocate BD table.\n");
+ shost_printk(KERN_ERR, qedi->shost,
+ "Could not allocate BD table.\n");
return -ENOMEM;
}
@@ -204,33 +202,29 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
return 0;
}
-static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
- struct iscsi_session *session)
+static int qedi_alloc_task_priv(struct iscsi_session *session,
+ struct iscsi_task *task)
{
- int i;
+ struct qedi_ctx *qedi = iscsi_host_priv(session->host);
+ struct qedi_cmd *cmd = task->dd_data;
- for (i = 0; i < session->cmds_max; i++) {
- struct iscsi_task *task = session->cmds[i];
- struct qedi_cmd *cmd = task->dd_data;
+ task->hdr = &cmd->hdr;
+ task->hdr_max = sizeof(struct iscsi_hdr);
- task->hdr = &cmd->hdr;
- task->hdr_max = sizeof(struct iscsi_hdr);
+ if (qedi_alloc_sget(qedi, cmd))
+ return -ENOMEM;
- if (qedi_alloc_sget(qedi, session, cmd))
- goto free_sgets;
-
- cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
- SCSI_SENSE_BUFFERSIZE,
- &cmd->sense_buffer_dma,
- GFP_KERNEL);
- if (!cmd->sense_buffer)
- goto free_sgets;
- }
+ cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+ SCSI_SENSE_BUFFERSIZE,
+ &cmd->sense_buffer_dma,
+ GFP_KERNEL);
+ if (!cmd->sense_buffer)
+ goto free_sgets;
return 0;
free_sgets:
- qedi_destroy_cmd_pool(qedi, session);
+ qedi_free_sget(qedi, cmd);
return -ENOMEM;
}
@@ -264,27 +258,7 @@ static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
return NULL;
}
- if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
- QEDI_ERR(&qedi->dbg_ctx,
- "Failed to setup cmd pool for ep=%p\n", qedi_ep);
- goto session_teardown;
- }
-
return cls_session;
-
-session_teardown:
- iscsi_session_teardown(cls_session);
- return NULL;
-}
-
-static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
-{
- struct iscsi_session *session = cls_session->dd_data;
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct qedi_ctx *qedi = iscsi_host_priv(shost);
-
- qedi_destroy_cmd_pool(qedi, session);
- iscsi_session_teardown(cls_session);
}
static struct iscsi_cls_conn *
@@ -1398,7 +1372,7 @@ struct iscsi_transport qedi_iscsi_transport = {
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
.create_session = qedi_session_create,
- .destroy_session = qedi_session_destroy,
+ .destroy_session = iscsi_session_teardown,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
.start_conn = qedi_conn_start,
@@ -1410,6 +1384,8 @@ struct iscsi_transport qedi_iscsi_transport = {
.get_session_param = iscsi_session_get_param,
.get_host_param = qedi_host_get_param,
.send_pdu = iscsi_conn_send_pdu,
+ .alloc_task_priv = qedi_alloc_task_priv,
+ .free_task_priv = qedi_free_task_priv,
.get_stats = qedi_conn_get_stats,
.xmit_task = qedi_task_xmit,
.cleanup_task = qedi_cleanup_task,
@@ -1625,7 +1601,7 @@ void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess)
qedi_conn_destroy(qedi_conn->cls_conn);
- qedi_session_destroy(cls_sess);
+ iscsi_session_teardown(cls_sess);
}
void qedi_process_tcp_error(struct qedi_endpoint *ep,
--
1.8.3.1
next prev parent reply other threads:[~2020-12-17 6:43 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-17 6:41 [RFC PATCH 00/22 V3] iscsi: lock clean ups Mike Christie
2020-12-17 6:41 ` [PATCH 01/22] libiscsi: fix iscsi_prep_scsi_cmd_pdu error handling Mike Christie
2020-12-17 6:41 ` [PATCH 02/22] libiscsi: drop taskqueuelock Mike Christie
2020-12-17 6:41 ` [PATCH 03/22] libiscsi: fix iscsi_task use after free Mike Christie
2020-12-17 6:41 ` [PATCH 04/22] qla4xxx: use iscsi_is_session_online Mike Christie
2020-12-17 6:41 ` [PATCH 05/22] iscsi class: drop session lock in iscsi_session_chkready Mike Christie
2020-12-17 6:41 ` [PATCH 06/22] libiscsi: remove queued_cmdsn Mike Christie
2020-12-17 6:41 ` [PATCH 07/22] libiscsi: drop frwd lock for session state Mike Christie
2020-12-17 6:41 ` [PATCH 08/22] libiscsi: add task prealloc/free callouts Mike Christie
2020-12-17 6:41 ` Mike Christie [this message]
2020-12-17 6:42 ` [PATCH 10/22] bnx2i: implement alloc_task_priv/free_task_priv Mike Christie
2020-12-17 6:42 ` [PATCH 11/22] iser, be2iscsi, qla4xxx: set scsi_host_template cmd_size Mike Christie
2020-12-17 6:42 ` [PATCH 12/22] bnx2i: " Mike Christie
2020-12-17 6:42 ` [PATCH 13/22] qedi: " Mike Christie
2020-12-17 6:42 ` [PATCH 14/22] iscsi_tcp, libcxgbi: " Mike Christie
2020-12-17 6:42 ` [PATCH 15/22] libiscsi: use scsi_host_busy_iter Mike Christie
2020-12-17 6:42 ` [PATCH 16/22] be2iscsi: " Mike Christie
2020-12-17 6:42 ` [PATCH 17/22] bnx2i: prep driver for switch to blk tags Mike Christie
2020-12-17 6:42 ` [PATCH 18/22] qedi: " Mike Christie
2020-12-17 6:42 ` [PATCH 19/22] libiscsi: use blk/scsi-ml mq cmd pre-allocator Mike Christie
2020-12-17 6:42 ` [PATCH 20/22] libiscsi: rm iscsi_put_task back_lock requirement Mike Christie
2020-12-17 6:42 ` [PATCH 21/22] libiscsi: drop back_lock from xmit path Mike Christie
2020-12-17 6:42 ` [PATCH 22/22] libiscsi: fix conn_send_pdu API Mike Christie
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1608187332-4434-10-git-send-email-michael.christie@oracle.com \
--to=michael.christie@oracle.com \
--cc=GR-QLogic-Storage-Upstream@marvell.com \
--cc=cleech@redhat.com \
--cc=jejb@linux.ibm.com \
--cc=lduncan@suse.com \
--cc=linux-scsi@vger.kernel.org \
--cc=martin.petersen@oracle.com \
--cc=mrangankar@marvell.com \
--cc=njavali@marvell.com \
--cc=varun@chelsio.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).