From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S935145AbbKTMtR (ORCPT ); Fri, 20 Nov 2015 07:49:17 -0500 Received: from mail-wm0-f46.google.com ([74.125.82.46]:33272 "EHLO mail-wm0-f46.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1760160AbbKTMsG (ORCPT ); Fri, 20 Nov 2015 07:48:06 -0500 From: =?UTF-8?q?Matias=20Bj=C3=B8rling?= To: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, axboe@fb.com Cc: Wenwei Tao , =?UTF-8?q?Matias=20Bj=C3=B8rling?= Subject: [PATCH 3/5] nvme: lightnvm: use admin queues for admin cmds Date: Fri, 20 Nov 2015 13:47:55 +0100 Message-Id: <1448023677-22296-4-git-send-email-m@bjorling.me> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1448023677-22296-1-git-send-email-m@bjorling.me> References: <1448023677-22296-1-git-send-email-m@bjorling.me> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Wenwei Tao According to the Open-Channel SSD Specification, the NVMe-NVM admin commands use vendor specific opcodes of NVMe, so use the NVMe admin queue to dispatch these commands. Signed-off-by: Wenwei Tao Updated by me to include set bad block table as well and also use the admin queue for l2p len calculation. Signed-off-by: Matias Bjørling --- drivers/nvme/host/lightnvm.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 7e82fe3..9202d1a 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -276,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) { struct nvme_ns *ns = q->queuedata; + struct nvme_dev *dev = ns->dev; struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_command c = {}; int ret; @@ -288,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) if (!nvme_nvm_id) return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, - sizeof(struct nvme_nvm_id)); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + nvme_nvm_id, sizeof(struct nvme_nvm_id)); if (ret) { ret = -EIO; goto out; @@ -315,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, struct nvme_ns *ns = q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; - u32 len = queue_max_hw_sectors(q) << 9; + u32 len = queue_max_hw_sectors(dev->admin_q) << 9; u32 nlb_pr_rq = len / sizeof(u64); u64 cmd_slba = slba; void *entries; @@ -333,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.nlb = cpu_to_le32(cmd_nlb); - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, - entries, len); + ret = nvme_submit_sync_cmd(dev->admin_q, + (struct nvme_command *)&c, entries, len); if (ret) { dev_err(dev->dev, "L2P table transfer failed (%d)\n", ret); @@ -375,7 +376,8 @@ static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, if (!bb_tbl) return -ENOMEM; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_tbl, tblsz); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + bb_tbl, tblsz); if (ret) { dev_err(dev->dev, "get bad block table failed (%d)\n", ret); ret = -EIO; @@ -427,7 +429,8 @@ static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); c.set_bb.value = type; - ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); + ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, + NULL, 0); if (ret) dev_err(dev->dev, "set bad block table failed (%d)\n", ret); return ret; -- 2.1.4