All of lore.kernel.org
 help / color / mirror / Atom feed
From: Himanshu Madhani <himanshu.madhani@oracle.com>
To: Shai Malin <smalin@marvell.com>,
	netdev@vger.kernel.org, linux-nvme@lists.infradead.org,
	davem@davemloft.net, kuba@kernel.org, sagi@grimberg.me,
	hch@lst.de, axboe@fb.com, kbusch@kernel.org
Cc: aelior@marvell.com, mkalderon@marvell.com, okulkarni@marvell.com,
	pkushwaha@marvell.com, malin1024@gmail.com,
	Arie Gershberg <agershberg@marvell.com>
Subject: Re: [RFC PATCH v5 05/27] nvme-tcp-offload: Add controller level error recovery implementation
Date: Fri, 21 May 2021 12:42:13 -0500	[thread overview]
Message-ID: <220e96a6-de82-4082-1d4e-2d95dcec5562@oracle.com> (raw)
In-Reply-To: <20210519111340.20613-6-smalin@marvell.com>



On 5/19/21 6:13 AM, Shai Malin wrote:
> From: Arie Gershberg <agershberg@marvell.com>
> 
> In this patch, we implement controller level error handling and recovery.
> Upon an error discovered by the ULP or reset controller initiated by the
> nvme-core (using reset_ctrl workqueue), the ULP will initiate a controller
> recovery which includes teardown and re-connect of all queues.
> 
> Acked-by: Igor Russkikh <irusskikh@marvell.com>
> Signed-off-by: Arie Gershberg <agershberg@marvell.com>
> Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
> Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
> Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> Signed-off-by: Ariel Elior <aelior@marvell.com>
> Signed-off-by: Shai Malin <smalin@marvell.com>
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> ---
>   drivers/nvme/host/tcp-offload.c | 138 +++++++++++++++++++++++++++++++-
>   drivers/nvme/host/tcp-offload.h |   1 +
>   2 files changed, 137 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
> index f7e0dc79bedd..9eb4b03e0f3d 100644
> --- a/drivers/nvme/host/tcp-offload.c
> +++ b/drivers/nvme/host/tcp-offload.c
> @@ -74,6 +74,23 @@ void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev)
>   }
>   EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev);
>   
> +/**
> + * nvme_tcp_ofld_error_recovery() - NVMeTCP Offload library error recovery.
> + * function.
> + * @nctrl:	NVMe controller instance to change to resetting.
> + *
> + * API function that change the controller state to resseting.
> + * Part of the overall controller reset sequence.
> + */
> +void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl)
> +{
> +	if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_RESETTING))
> +		return;
> +
> +	queue_work(nvme_reset_wq, &to_tcp_ofld_ctrl(nctrl)->err_work);
> +}
> +EXPORT_SYMBOL_GPL(nvme_tcp_ofld_error_recovery);
> +
>   /**
>    * nvme_tcp_ofld_report_queue_err() - NVMeTCP Offload report error event
>    * callback function. Pointed to by nvme_tcp_ofld_queue->report_err.
> @@ -84,7 +101,8 @@ EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev);
>    */
>   int nvme_tcp_ofld_report_queue_err(struct nvme_tcp_ofld_queue *queue)
>   {
> -	/* Placeholder - invoke error recovery flow */
> +	pr_err("nvme-tcp-offload queue error\n");
> +	nvme_tcp_ofld_error_recovery(&queue->ctrl->nctrl);
>   
>   	return 0;
>   }
> @@ -296,6 +314,28 @@ nvme_tcp_ofld_configure_io_queues(struct nvme_ctrl *nctrl, bool new)
>   	return rc;
>   }
>   
> +static void nvme_tcp_ofld_reconnect_or_remove(struct nvme_ctrl *nctrl)
> +{
> +	/* If we are resetting/deleting then do nothing */
> +	if (nctrl->state != NVME_CTRL_CONNECTING) {
> +		WARN_ON_ONCE(nctrl->state == NVME_CTRL_NEW ||
> +			     nctrl->state == NVME_CTRL_LIVE);
> +
> +		return;
> +	}
> +
> +	if (nvmf_should_reconnect(nctrl)) {
> +		dev_info(nctrl->device, "Reconnecting in %d seconds...\n",
> +			 nctrl->opts->reconnect_delay);
> +		queue_delayed_work(nvme_wq,
> +				   &to_tcp_ofld_ctrl(nctrl)->connect_work,
> +				   nctrl->opts->reconnect_delay * HZ);
> +	} else {
> +		dev_info(nctrl->device, "Removing controller...\n");
> +		nvme_delete_ctrl(nctrl);
> +	}
> +}
> +
>   static int nvme_tcp_ofld_setup_ctrl(struct nvme_ctrl *nctrl, bool new)
>   {
>   	struct nvmf_ctrl_options *opts = nctrl->opts;
> @@ -412,10 +452,68 @@ nvme_tcp_ofld_teardown_io_queues(struct nvme_ctrl *nctrl, bool remove)
>   	/* Placeholder - teardown_io_queues */
>   }
>   
> +static void nvme_tcp_ofld_reconnect_ctrl_work(struct work_struct *work)
> +{
> +	struct nvme_tcp_ofld_ctrl *ctrl =
> +				container_of(to_delayed_work(work),
> +					     struct nvme_tcp_ofld_ctrl,
> +					     connect_work);
> +	struct nvme_ctrl *nctrl = &ctrl->nctrl;
> +
> +	++nctrl->nr_reconnects;
> +
> +	if (ctrl->dev->ops->setup_ctrl(ctrl, false))
> +		goto requeue;
> +
> +	if (nvme_tcp_ofld_setup_ctrl(nctrl, false))
> +		goto release_and_requeue;
> +
> +	dev_info(nctrl->device, "Successfully reconnected (%d attempt)\n",
> +		 nctrl->nr_reconnects);
> +
> +	nctrl->nr_reconnects = 0;
> +
> +	return;
> +
> +release_and_requeue:
> +	ctrl->dev->ops->release_ctrl(ctrl);
> +requeue:
> +	dev_info(nctrl->device, "Failed reconnect attempt %d\n",
> +		 nctrl->nr_reconnects);
> +	nvme_tcp_ofld_reconnect_or_remove(nctrl);
> +}
> +
> +static void nvme_tcp_ofld_error_recovery_work(struct work_struct *work)
> +{
> +	struct nvme_tcp_ofld_ctrl *ctrl =
> +		container_of(work, struct nvme_tcp_ofld_ctrl, err_work);
> +	struct nvme_ctrl *nctrl = &ctrl->nctrl;
> +
> +	nvme_stop_keep_alive(nctrl);
> +	nvme_tcp_ofld_teardown_io_queues(nctrl, false);
> +	/* unquiesce to fail fast pending requests */
> +	nvme_start_queues(nctrl);
> +	nvme_tcp_ofld_teardown_admin_queue(nctrl, false);
> +	blk_mq_unquiesce_queue(nctrl->admin_q);
> +
> +	if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) {
> +		/* state change failure is ok if we started nctrl delete */
> +		WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
> +			     nctrl->state != NVME_CTRL_DELETING_NOIO);
> +
> +		return;
> +	}
> +
> +	nvme_tcp_ofld_reconnect_or_remove(nctrl);
> +}
> +
>   static void
>   nvme_tcp_ofld_teardown_ctrl(struct nvme_ctrl *nctrl, bool shutdown)
>   {
> -	/* Placeholder - err_work and connect_work */
> +	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl);
> +
> +	cancel_work_sync(&ctrl->err_work);
> +	cancel_delayed_work_sync(&ctrl->connect_work);
>   	nvme_tcp_ofld_teardown_io_queues(nctrl, shutdown);
>   	blk_mq_quiesce_queue(nctrl->admin_q);
>   	if (shutdown)
> @@ -430,6 +528,38 @@ static void nvme_tcp_ofld_delete_ctrl(struct nvme_ctrl *nctrl)
>   	nvme_tcp_ofld_teardown_ctrl(nctrl, true);
>   }
>   
> +static void nvme_tcp_ofld_reset_ctrl_work(struct work_struct *work)
> +{
> +	struct nvme_ctrl *nctrl =
> +		container_of(work, struct nvme_ctrl, reset_work);
> +	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl);
> +
> +	nvme_stop_ctrl(nctrl);
> +	nvme_tcp_ofld_teardown_ctrl(nctrl, false);
> +
> +	if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) {
> +		/* state change failure is ok if we started ctrl delete */
> +		WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
> +			     nctrl->state != NVME_CTRL_DELETING_NOIO);
> +
> +		return;
> +	}
> +
> +	if (ctrl->dev->ops->setup_ctrl(ctrl, false))
> +		goto out_fail;
> +
> +	if (nvme_tcp_ofld_setup_ctrl(nctrl, false))
> +		goto release_ctrl;
> +
> +	return;
> +
> +release_ctrl:
> +	ctrl->dev->ops->release_ctrl(ctrl);
> +out_fail:
> +	++nctrl->nr_reconnects;
> +	nvme_tcp_ofld_reconnect_or_remove(nctrl);
> +}
> +
>   static int
>   nvme_tcp_ofld_init_request(struct blk_mq_tag_set *set,
>   			   struct request *rq,
> @@ -526,6 +656,10 @@ nvme_tcp_ofld_create_ctrl(struct device *ndev, struct nvmf_ctrl_options *opts)
>   			     opts->nr_poll_queues + 1;
>   	nctrl->sqsize = opts->queue_size - 1;
>   	nctrl->kato = opts->kato;
> +	INIT_DELAYED_WORK(&ctrl->connect_work,
> +			  nvme_tcp_ofld_reconnect_ctrl_work);
> +	INIT_WORK(&ctrl->err_work, nvme_tcp_ofld_error_recovery_work);
> +	INIT_WORK(&nctrl->reset_work, nvme_tcp_ofld_reset_ctrl_work);
>   	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
>   		opts->trsvcid =
>   			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
> diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
> index 949132ce2ed4..2a931d05905d 100644
> --- a/drivers/nvme/host/tcp-offload.h
> +++ b/drivers/nvme/host/tcp-offload.h
> @@ -210,3 +210,4 @@ struct nvme_tcp_ofld_ops {
>   /* Exported functions for lower vendor specific offload drivers */
>   int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev);
>   void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev);
> +void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl);
> 

Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>

-- 
Himanshu Madhani                                Oracle Linux Engineering

WARNING: multiple messages have this Message-ID (diff)
From: Himanshu Madhani <himanshu.madhani@oracle.com>
To: Shai Malin <smalin@marvell.com>,
	netdev@vger.kernel.org, linux-nvme@lists.infradead.org,
	davem@davemloft.net, kuba@kernel.org, sagi@grimberg.me,
	hch@lst.de, axboe@fb.com, kbusch@kernel.org
Cc: aelior@marvell.com, mkalderon@marvell.com, okulkarni@marvell.com,
	pkushwaha@marvell.com, malin1024@gmail.com,
	Arie Gershberg <agershberg@marvell.com>
Subject: Re: [RFC PATCH v5 05/27] nvme-tcp-offload: Add controller level error recovery implementation
Date: Fri, 21 May 2021 12:42:13 -0500	[thread overview]
Message-ID: <220e96a6-de82-4082-1d4e-2d95dcec5562@oracle.com> (raw)
In-Reply-To: <20210519111340.20613-6-smalin@marvell.com>



On 5/19/21 6:13 AM, Shai Malin wrote:
> From: Arie Gershberg <agershberg@marvell.com>
> 
> In this patch, we implement controller level error handling and recovery.
> Upon an error discovered by the ULP or reset controller initiated by the
> nvme-core (using reset_ctrl workqueue), the ULP will initiate a controller
> recovery which includes teardown and re-connect of all queues.
> 
> Acked-by: Igor Russkikh <irusskikh@marvell.com>
> Signed-off-by: Arie Gershberg <agershberg@marvell.com>
> Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
> Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
> Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> Signed-off-by: Ariel Elior <aelior@marvell.com>
> Signed-off-by: Shai Malin <smalin@marvell.com>
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> ---
>   drivers/nvme/host/tcp-offload.c | 138 +++++++++++++++++++++++++++++++-
>   drivers/nvme/host/tcp-offload.h |   1 +
>   2 files changed, 137 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
> index f7e0dc79bedd..9eb4b03e0f3d 100644
> --- a/drivers/nvme/host/tcp-offload.c
> +++ b/drivers/nvme/host/tcp-offload.c
> @@ -74,6 +74,23 @@ void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev)
>   }
>   EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev);
>   
> +/**
> + * nvme_tcp_ofld_error_recovery() - NVMeTCP Offload library error recovery.
> + * function.
> + * @nctrl:	NVMe controller instance to change to resetting.
> + *
> + * API function that change the controller state to resseting.
> + * Part of the overall controller reset sequence.
> + */
> +void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl)
> +{
> +	if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_RESETTING))
> +		return;
> +
> +	queue_work(nvme_reset_wq, &to_tcp_ofld_ctrl(nctrl)->err_work);
> +}
> +EXPORT_SYMBOL_GPL(nvme_tcp_ofld_error_recovery);
> +
>   /**
>    * nvme_tcp_ofld_report_queue_err() - NVMeTCP Offload report error event
>    * callback function. Pointed to by nvme_tcp_ofld_queue->report_err.
> @@ -84,7 +101,8 @@ EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev);
>    */
>   int nvme_tcp_ofld_report_queue_err(struct nvme_tcp_ofld_queue *queue)
>   {
> -	/* Placeholder - invoke error recovery flow */
> +	pr_err("nvme-tcp-offload queue error\n");
> +	nvme_tcp_ofld_error_recovery(&queue->ctrl->nctrl);
>   
>   	return 0;
>   }
> @@ -296,6 +314,28 @@ nvme_tcp_ofld_configure_io_queues(struct nvme_ctrl *nctrl, bool new)
>   	return rc;
>   }
>   
> +static void nvme_tcp_ofld_reconnect_or_remove(struct nvme_ctrl *nctrl)
> +{
> +	/* If we are resetting/deleting then do nothing */
> +	if (nctrl->state != NVME_CTRL_CONNECTING) {
> +		WARN_ON_ONCE(nctrl->state == NVME_CTRL_NEW ||
> +			     nctrl->state == NVME_CTRL_LIVE);
> +
> +		return;
> +	}
> +
> +	if (nvmf_should_reconnect(nctrl)) {
> +		dev_info(nctrl->device, "Reconnecting in %d seconds...\n",
> +			 nctrl->opts->reconnect_delay);
> +		queue_delayed_work(nvme_wq,
> +				   &to_tcp_ofld_ctrl(nctrl)->connect_work,
> +				   nctrl->opts->reconnect_delay * HZ);
> +	} else {
> +		dev_info(nctrl->device, "Removing controller...\n");
> +		nvme_delete_ctrl(nctrl);
> +	}
> +}
> +
>   static int nvme_tcp_ofld_setup_ctrl(struct nvme_ctrl *nctrl, bool new)
>   {
>   	struct nvmf_ctrl_options *opts = nctrl->opts;
> @@ -412,10 +452,68 @@ nvme_tcp_ofld_teardown_io_queues(struct nvme_ctrl *nctrl, bool remove)
>   	/* Placeholder - teardown_io_queues */
>   }
>   
> +static void nvme_tcp_ofld_reconnect_ctrl_work(struct work_struct *work)
> +{
> +	struct nvme_tcp_ofld_ctrl *ctrl =
> +				container_of(to_delayed_work(work),
> +					     struct nvme_tcp_ofld_ctrl,
> +					     connect_work);
> +	struct nvme_ctrl *nctrl = &ctrl->nctrl;
> +
> +	++nctrl->nr_reconnects;
> +
> +	if (ctrl->dev->ops->setup_ctrl(ctrl, false))
> +		goto requeue;
> +
> +	if (nvme_tcp_ofld_setup_ctrl(nctrl, false))
> +		goto release_and_requeue;
> +
> +	dev_info(nctrl->device, "Successfully reconnected (%d attempt)\n",
> +		 nctrl->nr_reconnects);
> +
> +	nctrl->nr_reconnects = 0;
> +
> +	return;
> +
> +release_and_requeue:
> +	ctrl->dev->ops->release_ctrl(ctrl);
> +requeue:
> +	dev_info(nctrl->device, "Failed reconnect attempt %d\n",
> +		 nctrl->nr_reconnects);
> +	nvme_tcp_ofld_reconnect_or_remove(nctrl);
> +}
> +
> +static void nvme_tcp_ofld_error_recovery_work(struct work_struct *work)
> +{
> +	struct nvme_tcp_ofld_ctrl *ctrl =
> +		container_of(work, struct nvme_tcp_ofld_ctrl, err_work);
> +	struct nvme_ctrl *nctrl = &ctrl->nctrl;
> +
> +	nvme_stop_keep_alive(nctrl);
> +	nvme_tcp_ofld_teardown_io_queues(nctrl, false);
> +	/* unquiesce to fail fast pending requests */
> +	nvme_start_queues(nctrl);
> +	nvme_tcp_ofld_teardown_admin_queue(nctrl, false);
> +	blk_mq_unquiesce_queue(nctrl->admin_q);
> +
> +	if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) {
> +		/* state change failure is ok if we started nctrl delete */
> +		WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
> +			     nctrl->state != NVME_CTRL_DELETING_NOIO);
> +
> +		return;
> +	}
> +
> +	nvme_tcp_ofld_reconnect_or_remove(nctrl);
> +}
> +
>   static void
>   nvme_tcp_ofld_teardown_ctrl(struct nvme_ctrl *nctrl, bool shutdown)
>   {
> -	/* Placeholder - err_work and connect_work */
> +	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl);
> +
> +	cancel_work_sync(&ctrl->err_work);
> +	cancel_delayed_work_sync(&ctrl->connect_work);
>   	nvme_tcp_ofld_teardown_io_queues(nctrl, shutdown);
>   	blk_mq_quiesce_queue(nctrl->admin_q);
>   	if (shutdown)
> @@ -430,6 +528,38 @@ static void nvme_tcp_ofld_delete_ctrl(struct nvme_ctrl *nctrl)
>   	nvme_tcp_ofld_teardown_ctrl(nctrl, true);
>   }
>   
> +static void nvme_tcp_ofld_reset_ctrl_work(struct work_struct *work)
> +{
> +	struct nvme_ctrl *nctrl =
> +		container_of(work, struct nvme_ctrl, reset_work);
> +	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl);
> +
> +	nvme_stop_ctrl(nctrl);
> +	nvme_tcp_ofld_teardown_ctrl(nctrl, false);
> +
> +	if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) {
> +		/* state change failure is ok if we started ctrl delete */
> +		WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
> +			     nctrl->state != NVME_CTRL_DELETING_NOIO);
> +
> +		return;
> +	}
> +
> +	if (ctrl->dev->ops->setup_ctrl(ctrl, false))
> +		goto out_fail;
> +
> +	if (nvme_tcp_ofld_setup_ctrl(nctrl, false))
> +		goto release_ctrl;
> +
> +	return;
> +
> +release_ctrl:
> +	ctrl->dev->ops->release_ctrl(ctrl);
> +out_fail:
> +	++nctrl->nr_reconnects;
> +	nvme_tcp_ofld_reconnect_or_remove(nctrl);
> +}
> +
>   static int
>   nvme_tcp_ofld_init_request(struct blk_mq_tag_set *set,
>   			   struct request *rq,
> @@ -526,6 +656,10 @@ nvme_tcp_ofld_create_ctrl(struct device *ndev, struct nvmf_ctrl_options *opts)
>   			     opts->nr_poll_queues + 1;
>   	nctrl->sqsize = opts->queue_size - 1;
>   	nctrl->kato = opts->kato;
> +	INIT_DELAYED_WORK(&ctrl->connect_work,
> +			  nvme_tcp_ofld_reconnect_ctrl_work);
> +	INIT_WORK(&ctrl->err_work, nvme_tcp_ofld_error_recovery_work);
> +	INIT_WORK(&nctrl->reset_work, nvme_tcp_ofld_reset_ctrl_work);
>   	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
>   		opts->trsvcid =
>   			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
> diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
> index 949132ce2ed4..2a931d05905d 100644
> --- a/drivers/nvme/host/tcp-offload.h
> +++ b/drivers/nvme/host/tcp-offload.h
> @@ -210,3 +210,4 @@ struct nvme_tcp_ofld_ops {
>   /* Exported functions for lower vendor specific offload drivers */
>   int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev);
>   void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev);
> +void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl);
> 

Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>

-- 
Himanshu Madhani                                Oracle Linux Engineering

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply	other threads:[~2021-05-21 17:42 UTC|newest]

Thread overview: 106+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-19 11:13 [RFC PATCH v5 00/27] NVMeTCP Offload ULP and QEDN Device Driver Shai Malin
2021-05-19 11:13 ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 01/27] nvme-tcp-offload: Add nvme-tcp-offload - NVMeTCP HW offload ULP Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 17:06   ` Himanshu Madhani
2021-05-21 17:06     ` Himanshu Madhani
2021-05-24 20:11     ` Shai Malin
2021-05-24 20:11       ` Shai Malin
2021-05-21 22:13   ` Sagi Grimberg
2021-05-21 22:13     ` Sagi Grimberg
2021-05-24 20:08     ` Shai Malin
2021-05-24 20:08       ` Shai Malin
2021-06-08  9:28   ` Petr Mladek
2021-06-08  9:28     ` Petr Mladek
2021-05-19 11:13 ` [RFC PATCH v5 02/27] nvme-fabrics: Move NVMF_ALLOWED_OPTS and NVMF_REQUIRED_OPTS definitions Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 17:08   ` Himanshu Madhani
2021-05-21 17:08     ` Himanshu Madhani
2021-05-21 22:15   ` Sagi Grimberg
2021-05-21 22:15     ` Sagi Grimberg
2021-05-19 11:13 ` [RFC PATCH v5 03/27] nvme-tcp-offload: Add device scan implementation Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 17:22   ` Himanshu Madhani
2021-05-21 17:22     ` Himanshu Madhani
2021-05-21 22:22   ` Sagi Grimberg
2021-05-21 22:22     ` Sagi Grimberg
2021-05-24 20:14     ` Shai Malin
2021-05-24 20:14       ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 04/27] nvme-tcp-offload: Add controller level implementation Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 17:19   ` Himanshu Madhani
2021-05-21 17:19     ` Himanshu Madhani
2021-05-21 22:31   ` Sagi Grimberg
2021-05-21 22:31     ` Sagi Grimberg
2021-05-27 20:03     ` Shai Malin
2021-05-27 20:03       ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 05/27] nvme-tcp-offload: Add controller level error recovery implementation Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 17:42   ` Himanshu Madhani [this message]
2021-05-21 17:42     ` Himanshu Madhani
2021-05-21 22:34   ` Sagi Grimberg
2021-05-21 22:34     ` Sagi Grimberg
2021-05-19 11:13 ` [RFC PATCH v5 06/27] nvme-tcp-offload: Add queue level implementation Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 18:18   ` Himanshu Madhani
2021-05-21 18:18     ` Himanshu Madhani
2021-05-21 22:48   ` Sagi Grimberg
2021-05-21 22:48     ` Sagi Grimberg
2021-05-24 20:16     ` Shai Malin
2021-05-24 20:16       ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 07/27] nvme-tcp-offload: Add IO " Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 18:26   ` Himanshu Madhani
2021-05-21 18:26     ` Himanshu Madhani
2021-05-19 11:13 ` [RFC PATCH v5 08/27] nvme-tcp-offload: Add Timeout and ASYNC Support Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-21 18:36   ` Himanshu Madhani
2021-05-21 18:36     ` Himanshu Madhani
2021-05-21 22:51   ` Sagi Grimberg
2021-05-21 22:51     ` Sagi Grimberg
2021-05-24 20:17     ` Shai Malin
2021-05-24 20:17       ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 09/27] qed: Add TCP_ULP FW resource layout Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 10/27] qed: Add NVMeTCP Offload PF Level FW and HW HSI Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 11/27] qed: Add NVMeTCP Offload Connection " Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 12/27] qed: Add support of HW filter block Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 13/27] qed: Add NVMeTCP Offload IO Level FW and HW HSI Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 14/27] qed: Add NVMeTCP Offload IO Level FW Initializations Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 15/27] qed: Add IP services APIs support Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 16/27] qedn: Add qedn - Marvell's NVMeTCP HW offload vendor driver Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 17/27] qedn: Add qedn probe Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 12:31   ` Leon Romanovsky
2021-05-19 12:31     ` Leon Romanovsky
2021-05-19 14:29     ` Shai Malin
2021-05-19 14:29       ` Shai Malin
2021-05-19 15:31       ` Leon Romanovsky
2021-05-19 15:31         ` Leon Romanovsky
2021-05-19 11:13 ` [RFC PATCH v5 18/27] qedn: Add qedn_claim_dev API support Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 19/27] qedn: Add IRQ and fast-path resources initializations Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 20/27] qedn: Add connection-level slowpath functionality Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 21/27] qedn: Add support of configuring HW filter block Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 22/27] qedn: Add IO level qedn_send_req and fw_cq workqueue Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 23/27] qedn: Add support of Task and SGL Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 24/27] qedn: Add support of NVME ICReq & ICResp Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 25/27] qedn: Add IO level fastpath functionality Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 26/27] qedn: Add Connection and IO level recovery flows Shai Malin
2021-05-19 11:13   ` Shai Malin
2021-05-19 11:13 ` [RFC PATCH v5 27/27] qedn: Add support of ASYNC Shai Malin
2021-05-19 11:13   ` Shai Malin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=220e96a6-de82-4082-1d4e-2d95dcec5562@oracle.com \
    --to=himanshu.madhani@oracle.com \
    --cc=aelior@marvell.com \
    --cc=agershberg@marvell.com \
    --cc=axboe@fb.com \
    --cc=davem@davemloft.net \
    --cc=hch@lst.de \
    --cc=kbusch@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=malin1024@gmail.com \
    --cc=mkalderon@marvell.com \
    --cc=netdev@vger.kernel.org \
    --cc=okulkarni@marvell.com \
    --cc=pkushwaha@marvell.com \
    --cc=sagi@grimberg.me \
    --cc=smalin@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.