All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv3 0/5] Asynchronous events for udev
@ 2017-11-07 22:13 Keith Busch
  2017-11-07 22:13 ` [PATCHv3 1/5] nvme: Centralize AEN defines Keith Busch
                   ` (4 more replies)
  0 siblings, 5 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-07 22:13 UTC (permalink / raw)


This series updates asynchronous event notification throughout the driver
by centralizing common definitions, simplifying the code with a single
outstanding asynchronous event request, and ending with creating a change
uevent if the event result matches a whitelist of events to notify.

v2 -> v3:
  Merged to current nvme-4.15

  Use a whitelist instead of notifying all unhandled events

  Added define for the blk-mq tagset queue_depth (patch 1/5)

  Added reviews from previous round

Keith Busch (5):
  nvme: Centralize AEN defines
  nvme/fc: remove unused "queue_size" field
  nvme: Single AEN request
  nvme: Unexport starting async event work
  nvme: Send uevent for some asynchronous events

 drivers/nvme/host/core.c   | 62 ++++++++++++++++++++++++----------------------
 drivers/nvme/host/fc.c     | 47 ++++++++++++-----------------------
 drivers/nvme/host/nvme.h   |  6 ++---
 drivers/nvme/host/pci.c    | 18 +++-----------
 drivers/nvme/host/rdma.c   | 19 +++-----------
 drivers/nvme/target/loop.c | 16 +++---------
 include/linux/nvme.h       | 12 +++++++++
 7 files changed, 75 insertions(+), 105 deletions(-)

-- 
2.13.6

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 1/5] nvme: Centralize AEN defines
  2017-11-07 22:13 [PATCHv3 0/5] Asynchronous events for udev Keith Busch
@ 2017-11-07 22:13 ` Keith Busch
  2017-11-08  1:37   ` Guan Junxiong
  2017-11-09  9:23   ` Christoph Hellwig
  2017-11-07 22:13 ` [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field Keith Busch
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-07 22:13 UTC (permalink / raw)


All the transports were unnecessarilly duplicating the AEN request
accounting. This patch defines everything in one place.

Signed-off-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/core.c   |  2 +-
 drivers/nvme/host/fc.c     | 33 ++++++++++++---------------------
 drivers/nvme/host/nvme.h   |  1 -
 drivers/nvme/host/pci.c    | 16 +++-------------
 drivers/nvme/host/rdma.c   | 14 +++-----------
 drivers/nvme/target/loop.c | 14 +++-----------
 include/linux/nvme.h       |  8 ++++++++
 7 files changed, 30 insertions(+), 58 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 6f20c3a5673f..a3e2710be7c8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2792,7 +2792,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
-	ctrl->event_limit = NVME_NR_AERS;
+	ctrl->event_limit = NVME_NR_AEN_COMMANDS;
 	queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 113c30be7276..2c54b0e114ed 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -30,15 +30,6 @@
 /* *************************** Data Structures/Defines ****************** */
 
 
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_FC_NR_AEN_COMMANDS	1
-#define NVME_FC_AQ_BLKMQ_DEPTH	\
-	(NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
-#define AEN_CMDID_BASE		(NVME_FC_AQ_BLKMQ_DEPTH + 1)
-
 enum nvme_fc_queue_flags {
 	NVME_FC_Q_CONNECTED = (1 << 0),
 };
@@ -167,7 +158,7 @@ struct nvme_fc_ctrl {
 	u32			iocnt;
 	wait_queue_head_t	ioabort_wait;
 
-	struct nvme_fc_fcp_op	aen_ops[NVME_FC_NR_AEN_COMMANDS];
+	struct nvme_fc_fcp_op	aen_ops[NVME_NR_AEN_COMMANDS];
 
 	struct nvme_ctrl	ctrl;
 };
@@ -1533,7 +1524,7 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
 	unsigned long flags;
 	int i, ret;
 
-	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
 		if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
 			continue;
 
@@ -1803,7 +1794,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
 	int i, ret;
 
 	aen_op = ctrl->aen_ops;
-	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
 		private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
 						GFP_KERNEL);
 		if (!private)
@@ -1813,7 +1804,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
 		sqe = &cmdiu->sqe;
 		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
 				aen_op, (struct request *)NULL,
-				(AEN_CMDID_BASE + i));
+				(NVME_AQ_BLK_MQ_DEPTH + i));
 		if (ret) {
 			kfree(private);
 			return ret;
@@ -1826,7 +1817,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
 		memset(sqe, 0, sizeof(*sqe));
 		sqe->common.opcode = nvme_admin_async_event;
 		/* Note: core layer may overwrite the sqe.command_id value */
-		sqe->common.command_id = AEN_CMDID_BASE + i;
+		sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
 	}
 	return 0;
 }
@@ -1838,7 +1829,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
 	int i;
 
 	aen_op = ctrl->aen_ops;
-	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
 		if (!aen_op->fcp_req.private)
 			continue;
 
@@ -2389,7 +2380,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 	bool terminating = false;
 	blk_status_t ret;
 
-	if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
+	if (aer_idx > NVME_NR_AEN_COMMANDS)
 		return;
 
 	spin_lock_irqsave(&ctrl->lock, flags);
@@ -2651,16 +2642,16 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	 * Create the admin queue
 	 */
 
-	nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+	nvme_fc_init_queue(ctrl, 0, NVME_AQ_BLK_MQ_DEPTH);
 
 	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
-				NVME_FC_AQ_BLKMQ_DEPTH);
+				NVME_AQ_BLK_MQ_DEPTH);
 	if (ret)
 		goto out_free_queue;
 
 	ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
-				NVME_FC_AQ_BLKMQ_DEPTH,
-				(NVME_FC_AQ_BLKMQ_DEPTH / 4));
+				NVME_AQ_BLK_MQ_DEPTH,
+				(NVME_AQ_BLK_MQ_DEPTH / 4));
 	if (ret)
 		goto out_delete_hw_queue;
 
@@ -3065,7 +3056,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
 	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
-	ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
 	ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f7c21cea9485..a6d750cfa6b2 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -313,7 +313,6 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
 		bool send);
 
-#define NVME_NR_AERS	1
 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 		union nvme_result *res);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 32c413ec818c..c3dfd84feef7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -35,12 +35,6 @@
 #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
 
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AERS)
-
 #define SGES_PER_PAGE	(PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
 static int use_threaded_interrupts;
@@ -956,7 +950,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
 	 * for them but rather special case them here.
 	 */
 	if (unlikely(nvmeq->qid == 0 &&
-			cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
 		nvme_complete_async_event(&nvmeq->dev->ctrl,
 				cqe->status, &cqe->result);
 		return;
@@ -1057,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
 
 	memset(&c, 0, sizeof(c));
 	c.common.opcode = nvme_admin_async_event;
-	c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
+	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx;
 
 	spin_lock_irq(&nvmeq->q_lock);
 	__nvme_submit_cmd(nvmeq, &c);
@@ -1524,11 +1518,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 		dev->admin_tagset.ops = &nvme_mq_admin_ops;
 		dev->admin_tagset.nr_hw_queues = 1;
 
-		/*
-		 * Subtract one to leave an empty queue entry for 'Full Queue'
-		 * condition. See NVM-Express 1.2 specification, section 4.1.2.
-		 */
-		dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
+		dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
 		dev->admin_tagset.timeout = ADMIN_TIMEOUT;
 		dev->admin_tagset.numa_node = dev_to_node(dev->dev);
 		dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 03644ecf68d2..e92277304a8c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -41,14 +41,6 @@
 
 #define NVME_RDMA_MAX_INLINE_SEGMENTS	1
 
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_RDMA_NR_AEN_COMMANDS      1
-#define NVME_RDMA_AQ_BLKMQ_DEPTH       \
-	(NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
-
 struct nvme_rdma_device {
 	struct ib_device	*dev;
 	struct ib_pd		*pd;
@@ -690,7 +682,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
 		set = &ctrl->admin_tag_set;
 		memset(set, 0, sizeof(*set));
 		set->ops = &nvme_rdma_admin_mq_ops;
-		set->queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
+		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
 		set->reserved_tags = 2; /* connect + keep-alive */
 		set->numa_node = NUMA_NO_NODE;
 		set->cmd_size = sizeof(struct nvme_rdma_request) +
@@ -1318,7 +1310,7 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 
 	memset(cmd, 0, sizeof(*cmd));
 	cmd->common.opcode = nvme_admin_async_event;
-	cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
+	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
 	nvme_rdma_set_sg_null(cmd);
 
@@ -1380,7 +1372,7 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
 	 * for them but rather special case them here.
 	 */
 	if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
-			cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
+			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
 				&cqe->result);
 	else
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index bc95c6ed531a..7258b796f209 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -23,14 +23,6 @@
 
 #define NVME_LOOP_MAX_SEGMENTS		256
 
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_LOOP_NR_AEN_COMMANDS	1
-#define NVME_LOOP_AQ_BLKMQ_DEPTH	\
-	(NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
-
 struct nvme_loop_iod {
 	struct nvme_request	nvme_req;
 	struct nvme_command	cmd;
@@ -112,7 +104,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
 	 * for them but rather special case them here.
 	 */
 	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
-			cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
+			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
 				&cqe->result);
 	} else {
@@ -200,7 +192,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 
 	memset(&iod->cmd, 0, sizeof(iod->cmd));
 	iod->cmd.common.opcode = nvme_admin_async_event;
-	iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
+	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
 
 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
@@ -356,7 +348,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 
 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
-	ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
+	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index fd1d4508a612..89ffa7eed2fd 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -90,6 +90,14 @@ enum {
 };
 
 #define NVME_AQ_DEPTH		32
+#define NVME_NR_AEN_COMMANDS	1
+#define NVME_AQ_BLK_MQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
+
+/*
+ * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
+ * NVM-Express 1.2 specification, section 4.1.2.
+ */
+#define NVME_AQ_MQ_TAG_DEPTH	(NVME_AQ_BLK_MQ_DEPTH - 1)
 
 enum {
 	NVME_REG_CAP	= 0x0000,	/* Controller Capabilities */
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field
  2017-11-07 22:13 [PATCHv3 0/5] Asynchronous events for udev Keith Busch
  2017-11-07 22:13 ` [PATCHv3 1/5] nvme: Centralize AEN defines Keith Busch
@ 2017-11-07 22:13 ` Keith Busch
       [not found]   ` <45c5bfad-4854-2e01-905e-bcd8cbf42314@broadcom.com>
                     ` (2 more replies)
  2017-11-07 22:13 ` [PATCHv3 3/5] nvme: Single AEN request Keith Busch
                   ` (2 subsequent siblings)
  4 siblings, 3 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-07 22:13 UTC (permalink / raw)


This was being saved in a structure, but never used anywhere. The queue
size is obtained through other means, so there's no reason to duplicate
this without a user for it.

Reviewed-by: Sagi Grimberg <sagi at grimberg.me>
Signed-off-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/fc.c | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 2c54b0e114ed..5905897995c7 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -43,7 +43,6 @@ struct nvme_fc_queue {
 	struct device		*dev;
 	struct blk_mq_hw_ctx	*hctx;
 	void			*lldd_handle;
-	int			queue_size;
 	size_t			cmnd_capsule_len;
 	u32			qnum;
 	u32			rqcnt;
@@ -1873,7 +1872,7 @@ nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 }
 
 static void
-nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
 {
 	struct nvme_fc_queue *queue;
 
@@ -1889,8 +1888,6 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
 	else
 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
 
-	queue->queue_size = queue_size;
-
 	/*
 	 * Considered whether we should allocate buffers for all SQEs
 	 * and CQEs and dma map them - mapping their respective entries
@@ -2014,7 +2011,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
 	int i;
 
 	for (i = 1; i < ctrl->ctrl.queue_count; i++)
-		nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+		nvme_fc_init_queue(ctrl, i);
 }
 
 static void
@@ -2642,7 +2639,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	 * Create the admin queue
 	 */
 
-	nvme_fc_init_queue(ctrl, 0, NVME_AQ_BLK_MQ_DEPTH);
+	nvme_fc_init_queue(ctrl, 0);
 
 	ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
 				NVME_AQ_BLK_MQ_DEPTH);
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCHv3 3/5] nvme: Single AEN request
  2017-11-07 22:13 [PATCHv3 0/5] Asynchronous events for udev Keith Busch
  2017-11-07 22:13 ` [PATCHv3 1/5] nvme: Centralize AEN defines Keith Busch
  2017-11-07 22:13 ` [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field Keith Busch
@ 2017-11-07 22:13 ` Keith Busch
  2017-11-08  1:35   ` Guan Junxiong
  2017-11-09  9:25   ` Christoph Hellwig
  2017-11-07 22:13 ` [PATCHv3 4/5] nvme: Unexport starting async event work Keith Busch
  2017-11-07 22:13 ` [PATCHv3 5/5] nvme: Send uevent for some asynchronous events Keith Busch
  4 siblings, 2 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-07 22:13 UTC (permalink / raw)


The driver can handle tracking only one AEN request, so this patch
removes handling for multiple ones.

Reviewed-by: Christoph Hellwig <hch at lst.de>
Reviewed-by: James Smart  <james.smart at broadcom.com>
Signed-off-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/core.c   | 28 +++-------------------------
 drivers/nvme/host/fc.c     |  9 +++------
 drivers/nvme/host/nvme.h   |  3 +--
 drivers/nvme/host/pci.c    |  4 ++--
 drivers/nvme/host/rdma.c   |  5 +----
 drivers/nvme/target/loop.c |  2 +-
 6 files changed, 11 insertions(+), 40 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a3e2710be7c8..179ae56c4ad0 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2683,15 +2683,7 @@ static void nvme_async_event_work(struct work_struct *work)
 	struct nvme_ctrl *ctrl =
 		container_of(work, struct nvme_ctrl, async_event_work);
 
-	spin_lock_irq(&ctrl->lock);
-	while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
-		int aer_idx = --ctrl->event_limit;
-
-		spin_unlock_irq(&ctrl->lock);
-		ctrl->ops->submit_async_event(ctrl, aer_idx);
-		spin_lock_irq(&ctrl->lock);
-	}
-	spin_unlock_irq(&ctrl->lock);
+	ctrl->ops->submit_async_event(ctrl);
 }
 
 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -2758,22 +2750,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 		union nvme_result *res)
 {
 	u32 result = le32_to_cpu(res->u32);
-	bool done = true;
 
-	switch (le16_to_cpu(status) >> 1) {
-	case NVME_SC_SUCCESS:
-		done = false;
-		/*FALLTHRU*/
-	case NVME_SC_ABORT_REQ:
-		++ctrl->event_limit;
-		if (ctrl->state == NVME_CTRL_LIVE)
-			queue_work(nvme_wq, &ctrl->async_event_work);
-		break;
-	default:
-		break;
-	}
-
-	if (done)
+	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
 		return;
 
 	switch (result & 0xff07) {
@@ -2787,12 +2765,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 	default:
 		dev_warn(ctrl->device, "async event result %08x\n", result);
 	}
+	queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
-	ctrl->event_limit = NVME_NR_AEN_COMMANDS;
 	queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5905897995c7..b80392a97d9e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2369,7 +2369,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
 }
 
 static void
-nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+nvme_fc_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
 	struct nvme_fc_fcp_op *aen_op;
@@ -2377,9 +2377,6 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 	bool terminating = false;
 	blk_status_t ret;
 
-	if (aer_idx > NVME_NR_AEN_COMMANDS)
-		return;
-
 	spin_lock_irqsave(&ctrl->lock, flags);
 	if (ctrl->flags & FCCTRL_TERMIO)
 		terminating = true;
@@ -2388,13 +2385,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 	if (terminating)
 		return;
 
-	aen_op = &ctrl->aen_ops[aer_idx];
+	aen_op = &ctrl->aen_ops[0];
 
 	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
 					NVMEFC_FCP_NODATA);
 	if (ret)
 		dev_err(ctrl->ctrl.device,
-			"failed async event work [%d]\n", aer_idx);
+			"failed async event work\n");
 }
 
 static void
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a6d750cfa6b2..b55c97ecea31 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -162,7 +162,6 @@ struct nvme_ctrl {
 	u16 nssa;
 	u16 nr_streams;
 	atomic_t abort_limit;
-	u8 event_limit;
 	u8 vwc;
 	u32 vs;
 	u32 sgls;
@@ -237,7 +236,7 @@ struct nvme_ctrl_ops {
 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
-	void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
+	void (*submit_async_event)(struct nvme_ctrl *ctrl);
 	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
 	int (*reinit_request)(void *data, struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c3dfd84feef7..429d56f1a19e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1043,7 +1043,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
 	return __nvme_poll(nvmeq, tag);
 }
 
-static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
 {
 	struct nvme_dev *dev = to_nvme_dev(ctrl);
 	struct nvme_queue *nvmeq = dev->queues[0];
@@ -1051,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
 
 	memset(&c, 0, sizeof(c));
 	c.common.opcode = nvme_admin_async_event;
-	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx;
+	c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
 
 	spin_lock_irq(&nvmeq->q_lock);
 	__nvme_submit_cmd(nvmeq, &c);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index e92277304a8c..5ba3f5304119 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1293,7 +1293,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
 	return queue->ctrl->tag_set.tags[queue_idx - 1];
 }
 
-static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
 	struct nvme_rdma_queue *queue = &ctrl->queues[0];
@@ -1303,9 +1303,6 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 	struct ib_sge sge;
 	int ret;
 
-	if (WARN_ON_ONCE(aer_idx != 0))
-		return;
-
 	ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
 
 	memset(cmd, 0, sizeof(*cmd));
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 7258b796f209..f40e70eb4a38 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_STS_OK;
 }
 
-static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
 {
 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
 	struct nvme_loop_queue *queue = &ctrl->queues[0];
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCHv3 4/5] nvme: Unexport starting async event work
  2017-11-07 22:13 [PATCHv3 0/5] Asynchronous events for udev Keith Busch
                   ` (2 preceding siblings ...)
  2017-11-07 22:13 ` [PATCHv3 3/5] nvme: Single AEN request Keith Busch
@ 2017-11-07 22:13 ` Keith Busch
  2017-11-08  1:39   ` Guan Junxiong
  2017-11-07 22:13 ` [PATCHv3 5/5] nvme: Send uevent for some asynchronous events Keith Busch
  4 siblings, 1 reply; 20+ messages in thread
From: Keith Busch @ 2017-11-07 22:13 UTC (permalink / raw)


Async event work is for core use only and should not be called directly
from drivers.

Reviewed-by: Christoph Hellwig <hch at lst.de>
Signed-off-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/core.c | 8 +-------
 drivers/nvme/host/nvme.h | 1 -
 2 files changed, 1 insertion(+), 8 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 179ae56c4ad0..f5e059af65cc 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2769,12 +2769,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 }
 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 
-void nvme_queue_async_events(struct nvme_ctrl *ctrl)
-{
-	queue_work(nvme_wq, &ctrl->async_event_work);
-}
-EXPORT_SYMBOL_GPL(nvme_queue_async_events);
-
 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
 {
 	nvme_stop_keep_alive(ctrl);
@@ -2791,7 +2785,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
 
 	if (ctrl->queue_count > 1) {
 		nvme_queue_scan(ctrl);
-		nvme_queue_async_events(ctrl);
+		queue_work(nvme_wq, &ctrl->async_event_work);
 		nvme_start_queues(ctrl);
 	}
 }
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b55c97ecea31..151062573ece 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -314,7 +314,6 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
 
 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 		union nvme_result *res);
-void nvme_queue_async_events(struct nvme_ctrl *ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
 void nvme_start_queues(struct nvme_ctrl *ctrl);
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCHv3 5/5] nvme: Send uevent for some asynchronous events
  2017-11-07 22:13 [PATCHv3 0/5] Asynchronous events for udev Keith Busch
                   ` (3 preceding siblings ...)
  2017-11-07 22:13 ` [PATCHv3 4/5] nvme: Unexport starting async event work Keith Busch
@ 2017-11-07 22:13 ` Keith Busch
  2017-11-08  1:40   ` Guan Junxiong
  2017-11-09  9:27   ` Christoph Hellwig
  4 siblings, 2 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-07 22:13 UTC (permalink / raw)


This will give udev a chance to observe and handle asynchronous event
notifications and clear the log to unmask future events of the same type.
The driver will create a change uevent of the asyncronuos event result
before submitting the next AEN request to the device if a completed AEN
event is of type error, smart, command set or vendor specific,

Signed-off-by: Keith Busch <keith.busch at intel.com>
---
 drivers/nvme/host/core.c | 28 ++++++++++++++++++++++++++++
 drivers/nvme/host/nvme.h |  1 +
 include/linux/nvme.h     |  4 ++++
 3 files changed, 33 insertions(+)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f5e059af65cc..486d02204c5d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2678,11 +2678,28 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
 
+static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
+{
+	char *envp[2] = {NULL, NULL};
+	u32 aen = ctrl->aen;
+
+	ctrl->aen = 0;
+	if (!aen)
+		return;
+
+	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen);
+	if (!envp[0])
+		return;
+	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+	kfree(envp[0]);
+}
+
 static void nvme_async_event_work(struct work_struct *work)
 {
 	struct nvme_ctrl *ctrl =
 		container_of(work, struct nvme_ctrl, async_event_work);
 
+	nvme_aen_uevent(ctrl);
 	ctrl->ops->submit_async_event(ctrl);
 }
 
@@ -2754,6 +2771,17 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
 		return;
 
+	switch (result & 0x7) {
+	case NVME_AER_ERROR:
+	case NVME_AER_SMART:
+	case NVME_AER_CSS:
+	case NVME_AER_VS:
+		ctrl->aen = result;
+		break;
+	default:
+		break;
+	}
+
 	switch (result & 0xff07) {
 	case NVME_AER_NOTICE_NS_CHANGED:
 		dev_info(ctrl->device, "rescanning\n");
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 151062573ece..b50ac356c3db 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -168,6 +168,7 @@ struct nvme_ctrl {
 	u16 kas;
 	u8 npss;
 	u8 apsta;
+	u32 aen;
 	unsigned int shutdown_timeout;
 	unsigned int kato;
 	bool subsystem;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 89ffa7eed2fd..aea87f0d917b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -428,6 +428,10 @@ enum {
 };
 
 enum {
+	NVME_AER_ERROR			= 0,
+	NVME_AER_SMART			= 1,
+	NVME_AER_CSS			= 6,
+	NVME_AER_VS			= 7,
 	NVME_AER_NOTICE_NS_CHANGED	= 0x0002,
 	NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
 };
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field
       [not found]   ` <45c5bfad-4854-2e01-905e-bcd8cbf42314@broadcom.com>
@ 2017-11-08  0:04     ` Keith Busch
  0 siblings, 0 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-08  0:04 UTC (permalink / raw)


On Tue, Nov 07, 2017@03:56:52PM -0800, James Smart wrote:
>    On 11/7/2017 2:13 PM, Keith Busch wrote:
> 
>  This was being saved in a structure, but never used anywhere. The queue
>  size is obtained through other means, so there's no reason to duplicate
>  this without a user for it.
> 
>  Reviewed-by: Sagi Grimberg [1]<sagi at grimberg.me>
>  Signed-off-by: Keith Busch [2]<keith.busch at intel.com>
>  ---
>   drivers/nvme/host/fc.c | 9 +++------
>   1 file changed, 3 insertions(+), 6 deletions(-)
> 
> 
>    I had reviewed the v2.
> 
>    Reviewed-by: James Smart [3]<james.smart at broadcom.com>

Indeed, sorry about that, and thank you for the review.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 3/5] nvme: Single AEN request
  2017-11-07 22:13 ` [PATCHv3 3/5] nvme: Single AEN request Keith Busch
@ 2017-11-08  1:35   ` Guan Junxiong
  2017-11-09  9:24     ` Christoph Hellwig
  2017-11-09  9:25   ` Christoph Hellwig
  1 sibling, 1 reply; 20+ messages in thread
From: Guan Junxiong @ 2017-11-08  1:35 UTC (permalink / raw)



On 2017/11/8 6:13, Keith Busch wrote:
>  static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
> @@ -2758,22 +2750,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
>  		union nvme_result *res)
>  {
>  	u32 result = le32_to_cpu(res->u32);
> -	bool done = true;
>  
> -	switch (le16_to_cpu(status) >> 1) {
> -	case NVME_SC_SUCCESS:
> -		done = false;
> -		/*FALLTHRU*/
> -	case NVME_SC_ABORT_REQ:
> -		++ctrl->event_limit;
> -		if (ctrl->state == NVME_CTRL_LIVE)
> -			queue_work(nvme_wq, &ctrl->async_event_work);
> -		break;
> -	default:
> -		break;
> -	}
> -
> -	if (done)
> +	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
>  		return;
If  le16_to_cpu(status) >> 1 is NVME_SC_ABORT_REQ, the current async event request is
aborted and there is no pending request in the queue of async event request of the target.
Why do you choose not to submit another async event request to the target?

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 1/5] nvme: Centralize AEN defines
  2017-11-07 22:13 ` [PATCHv3 1/5] nvme: Centralize AEN defines Keith Busch
@ 2017-11-08  1:37   ` Guan Junxiong
  2017-11-09  9:23   ` Christoph Hellwig
  1 sibling, 0 replies; 20+ messages in thread
From: Guan Junxiong @ 2017-11-08  1:37 UTC (permalink / raw)



Reviewed-by: Guan Junxiong <guanjunxiong at huawei.com>

On 2017/11/8 6:13, Keith Busch wrote:
> All the transports were unnecessarilly duplicating the AEN request
> accounting. This patch defines everything in one place.
> 
> Signed-off-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field
  2017-11-07 22:13 ` [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field Keith Busch
       [not found]   ` <45c5bfad-4854-2e01-905e-bcd8cbf42314@broadcom.com>
@ 2017-11-08  1:38   ` Guan Junxiong
  2017-11-09  9:23   ` Christoph Hellwig
  2 siblings, 0 replies; 20+ messages in thread
From: Guan Junxiong @ 2017-11-08  1:38 UTC (permalink / raw)




On 2017/11/8 6:13, Keith Busch wrote:
> This was being saved in a structure, but never used anywhere. The queue
> size is obtained through other means, so there's no reason to duplicate
> this without a user for it.

Reviewed-by: Guan Junxiong <guanjunxiong at huawei.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 4/5] nvme: Unexport starting async event work
  2017-11-07 22:13 ` [PATCHv3 4/5] nvme: Unexport starting async event work Keith Busch
@ 2017-11-08  1:39   ` Guan Junxiong
  0 siblings, 0 replies; 20+ messages in thread
From: Guan Junxiong @ 2017-11-08  1:39 UTC (permalink / raw)



Reviewed-by: Guan Junxiong <guanjunxiong at huawei.com>

On 2017/11/8 6:13, Keith Busch wrote:
> Async event work is for core use only and should not be called directly
> from drivers.
> 
> Reviewed-by: Christoph Hellwig <hch at lst.de>
> Signed-off-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 5/5] nvme: Send uevent for some asynchronous events
  2017-11-07 22:13 ` [PATCHv3 5/5] nvme: Send uevent for some asynchronous events Keith Busch
@ 2017-11-08  1:40   ` Guan Junxiong
  2017-11-09  9:27   ` Christoph Hellwig
  1 sibling, 0 replies; 20+ messages in thread
From: Guan Junxiong @ 2017-11-08  1:40 UTC (permalink / raw)



Reviewed-by: Guan Junxiong <guanjunxiong at huawei.com>

On 2017/11/8 6:13, Keith Busch wrote:
> This will give udev a chance to observe and handle asynchronous event
> notifications and clear the log to unmask future events of the same type.
> The driver will create a change uevent of the asyncronuos event result
> before submitting the next AEN request to the device if a completed AEN
> event is of type error, smart, command set or vendor specific,
> 
> Signed-off-by: Keith Busch <keith.busch at intel.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 1/5] nvme: Centralize AEN defines
  2017-11-07 22:13 ` [PATCHv3 1/5] nvme: Centralize AEN defines Keith Busch
  2017-11-08  1:37   ` Guan Junxiong
@ 2017-11-09  9:23   ` Christoph Hellwig
  1 sibling, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-11-09  9:23 UTC (permalink / raw)


Looks fine:

Reviewed-by: Christoph Hellwig <hch at lst.de>

> diff --git a/include/linux/nvme.h b/include/linux/nvme.h
> index fd1d4508a612..89ffa7eed2fd 100644
> --- a/include/linux/nvme.h
> +++ b/include/linux/nvme.h
> @@ -90,6 +90,14 @@ enum {
>  };
>  
>  #define NVME_AQ_DEPTH		32
> +#define NVME_NR_AEN_COMMANDS	1
> +#define NVME_AQ_BLK_MQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
> +
> +/*
> + * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
> + * NVM-Express 1.2 specification, section 4.1.2.
> + */
> +#define NVME_AQ_MQ_TAG_DEPTH	(NVME_AQ_BLK_MQ_DEPTH - 1)

But none of these (including NVME_AQ_DEPTH) really is a protocol
constant, so this should probably move to drivers/nvme/host/nvme.h.

No need to do that in this patchseries, though.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field
  2017-11-07 22:13 ` [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field Keith Busch
       [not found]   ` <45c5bfad-4854-2e01-905e-bcd8cbf42314@broadcom.com>
  2017-11-08  1:38   ` Guan Junxiong
@ 2017-11-09  9:23   ` Christoph Hellwig
  2 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-11-09  9:23 UTC (permalink / raw)


Looks fine,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 3/5] nvme: Single AEN request
  2017-11-08  1:35   ` Guan Junxiong
@ 2017-11-09  9:24     ` Christoph Hellwig
  2017-11-09 16:04       ` Keith Busch
  0 siblings, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-11-09  9:24 UTC (permalink / raw)


On Wed, Nov 08, 2017@09:35:39AM +0800, Guan Junxiong wrote:
> If  le16_to_cpu(status) >> 1 is NVME_SC_ABORT_REQ, the current async event request is
> aborted and there is no pending request in the queue of async event request of the target.
> Why do you choose not to submit another async event request to the target?

We're never going to send an abort for the AER, so this status code
will only happen if the controller is being torn down, in that case we
don't want to submit another one until the controller is set up again.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 3/5] nvme: Single AEN request
  2017-11-07 22:13 ` [PATCHv3 3/5] nvme: Single AEN request Keith Busch
  2017-11-08  1:35   ` Guan Junxiong
@ 2017-11-09  9:25   ` Christoph Hellwig
  1 sibling, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-11-09  9:25 UTC (permalink / raw)


Looks good,

Reviewed-by: Christoph Hellwig <hch at lst.de>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 5/5] nvme: Send uevent for some asynchronous events
  2017-11-07 22:13 ` [PATCHv3 5/5] nvme: Send uevent for some asynchronous events Keith Busch
  2017-11-08  1:40   ` Guan Junxiong
@ 2017-11-09  9:27   ` Christoph Hellwig
  2017-11-09 16:18     ` Keith Busch
  1 sibling, 1 reply; 20+ messages in thread
From: Christoph Hellwig @ 2017-11-09  9:27 UTC (permalink / raw)


On Tue, Nov 07, 2017@03:13:14PM -0700, Keith Busch wrote:
> This will give udev a chance to observe and handle asynchronous event
> notifications and clear the log to unmask future events of the same type.
> The driver will create a change uevent of the asyncronuos event result
> before submitting the next AEN request to the device if a completed AEN
> event is of type error, smart, command set or vendor specific,
> 
> Signed-off-by: Keith Busch <keith.busch at intel.com>
> ---
>  drivers/nvme/host/core.c | 28 ++++++++++++++++++++++++++++
>  drivers/nvme/host/nvme.h |  1 +
>  include/linux/nvme.h     |  4 ++++
>  3 files changed, 33 insertions(+)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index f5e059af65cc..486d02204c5d 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -2678,11 +2678,28 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
>  }
>  EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
>  
> +static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
> +{
> +	char *envp[2] = {NULL, NULL};
> +	u32 aen = ctrl->aen;
> +
> +	ctrl->aen = 0;

Seems like we should use cmpxchg on aen.

> +	if (!aen)
> +		return;
> +
> +	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen);
> +	if (!envp[0])
> +		return;
> +	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
> +	kfree(envp[0]);
> +}
> +
>  static void nvme_async_event_work(struct work_struct *work)
>  {
>  	struct nvme_ctrl *ctrl =
>  		container_of(work, struct nvme_ctrl, async_event_work);
>  
> +	nvme_aen_uevent(ctrl);
>  	ctrl->ops->submit_async_event(ctrl);
>  }
>  
> @@ -2754,6 +2771,17 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
>  	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
>  		return;
>  
> +	switch (result & 0x7) {
> +	case NVME_AER_ERROR:
> +	case NVME_AER_SMART:
> +	case NVME_AER_CSS:
> +	case NVME_AER_VS:
> +		ctrl->aen = result;

Can we call the field aen_result?

Given that the rest of the series looks good I'd be tempted to just
fix that up and apply it.  Are you ok with that?

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 3/5] nvme: Single AEN request
  2017-11-09  9:24     ` Christoph Hellwig
@ 2017-11-09 16:04       ` Keith Busch
  0 siblings, 0 replies; 20+ messages in thread
From: Keith Busch @ 2017-11-09 16:04 UTC (permalink / raw)


On Thu, Nov 09, 2017@10:24:50AM +0100, Christoph Hellwig wrote:
> On Wed, Nov 08, 2017@09:35:39AM +0800, Guan Junxiong wrote:
> > If  le16_to_cpu(status) >> 1 is NVME_SC_ABORT_REQ, the current async event request is
> > aborted and there is no pending request in the queue of async event request of the target.
> > Why do you choose not to submit another async event request to the target?
> 
> We're never going to send an abort for the AER, so this status code
> will only happen if the controller is being torn down, in that case we
> don't want to submit another one until the controller is set up again.

Yeah, reclaiming the AER count on abort was a trick for controller resets
when we were tracking those requests. Back then, every reset you'd see
at least one "Cancelling I/O QID 0" message for AER commands. We've had
a better way to handle this wor a while now, so that was left-over
dead code.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 5/5] nvme: Send uevent for some asynchronous events
  2017-11-09  9:27   ` Christoph Hellwig
@ 2017-11-09 16:18     ` Keith Busch
  2017-11-09 16:23       ` Christoph Hellwig
  0 siblings, 1 reply; 20+ messages in thread
From: Keith Busch @ 2017-11-09 16:18 UTC (permalink / raw)


On Thu, Nov 09, 2017@10:27:03AM +0100, Christoph Hellwig wrote:
> On Tue, Nov 07, 2017@03:13:14PM -0700, Keith Busch wrote:
> > +static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
> > +{
> > +	char *envp[2] = {NULL, NULL};
> > +	u32 aen = ctrl->aen;
> > +
> > +	ctrl->aen = 0;
> 
> Seems like we should use cmpxchg on aen.

> > +		ctrl->aen = result;
> 
> Can we call the field aen_result?
> 
> Given that the rest of the series looks good I'd be tempted to just
> fix that up and apply it.  Are you ok with that?

Thanks, the suggestions sound good to me. If you want to edit the commit
when applying, I would much appreciate.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCHv3 5/5] nvme: Send uevent for some asynchronous events
  2017-11-09 16:18     ` Keith Busch
@ 2017-11-09 16:23       ` Christoph Hellwig
  0 siblings, 0 replies; 20+ messages in thread
From: Christoph Hellwig @ 2017-11-09 16:23 UTC (permalink / raw)


On Thu, Nov 09, 2017@09:18:46AM -0700, Keith Busch wrote:
> > Seems like we should use cmpxchg on aen.
> 
> > > +		ctrl->aen = result;
> > 
> > Can we call the field aen_result?
> > 
> > Given that the rest of the series looks good I'd be tempted to just
> > fix that up and apply it.  Are you ok with that?
> 
> Thanks, the suggestions sound good to me. If you want to edit the commit
> when applying, I would much appreciate.

I'll just do the rename for now.  I'll ponder the cmpxchg a little more,
and if I think we need it I'll send a follow on patch.

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2017-11-09 16:23 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-07 22:13 [PATCHv3 0/5] Asynchronous events for udev Keith Busch
2017-11-07 22:13 ` [PATCHv3 1/5] nvme: Centralize AEN defines Keith Busch
2017-11-08  1:37   ` Guan Junxiong
2017-11-09  9:23   ` Christoph Hellwig
2017-11-07 22:13 ` [PATCHv3 2/5] nvme/fc: remove unused "queue_size" field Keith Busch
     [not found]   ` <45c5bfad-4854-2e01-905e-bcd8cbf42314@broadcom.com>
2017-11-08  0:04     ` Keith Busch
2017-11-08  1:38   ` Guan Junxiong
2017-11-09  9:23   ` Christoph Hellwig
2017-11-07 22:13 ` [PATCHv3 3/5] nvme: Single AEN request Keith Busch
2017-11-08  1:35   ` Guan Junxiong
2017-11-09  9:24     ` Christoph Hellwig
2017-11-09 16:04       ` Keith Busch
2017-11-09  9:25   ` Christoph Hellwig
2017-11-07 22:13 ` [PATCHv3 4/5] nvme: Unexport starting async event work Keith Busch
2017-11-08  1:39   ` Guan Junxiong
2017-11-07 22:13 ` [PATCHv3 5/5] nvme: Send uevent for some asynchronous events Keith Busch
2017-11-08  1:40   ` Guan Junxiong
2017-11-09  9:27   ` Christoph Hellwig
2017-11-09 16:18     ` Keith Busch
2017-11-09 16:23       ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.