All of lore.kernel.org
 help / color / mirror / Atom feed
* [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
@ 2018-08-20 22:06 James Smart
  2018-09-05 20:14 ` James Smart
  2018-09-11  8:58 ` Christoph Hellwig
  0 siblings, 2 replies; 6+ messages in thread
From: James Smart @ 2018-08-20 22:06 UTC (permalink / raw)


The fc transport device should allow for a rediscovery, as userspace
might have lost the events. Example is udev events not handled during
system startup.

This patch add a sysfs entry 'nvme_discovery' on the fc class to
have it replay all udev discovery events for all local port/remote
port address pairs.

Signed-off-by: James Smart <james.smart at broadcom.com>
Reviewed-by: Hannes Reinecke <hare at suse.com>
Cc: Ewan Milne <emilne at redhat.com>
Cc: Johannes Thumshirn <jthumshirn at suse.com>

---
reposting after it's last post in June
---
 drivers/nvme/host/fc.c | 114 +++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 105 insertions(+), 9 deletions(-)

diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 611e70cae754..286629d1d2b8 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -122,6 +122,7 @@ struct nvme_fc_rport {
 	struct list_head		endp_list; /* for lport->endp_list */
 	struct list_head		ctrl_list;
 	struct list_head		ls_req_list;
+	struct list_head		disc_list;
 	struct device			*dev;	/* physical device for dma */
 	struct nvme_fc_lport		*lport;
 	spinlock_t			lock;
@@ -210,7 +211,6 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
  * These items are short-term. They will eventually be moved into
  * a generic FC class. See comments in module init.
  */
-static struct class *fc_class;
 static struct device *fc_udev_device;
 
 
@@ -507,6 +507,7 @@ nvme_fc_free_rport(struct kref *ref)
 	list_del(&rport->endp_list);
 	spin_unlock_irqrestore(&nvme_fc_lock, flags);
 
+	WARN_ON(!list_empty(&rport->disc_list));
 	ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
 
 	kfree(rport);
@@ -694,6 +695,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
 	INIT_LIST_HEAD(&newrec->endp_list);
 	INIT_LIST_HEAD(&newrec->ctrl_list);
 	INIT_LIST_HEAD(&newrec->ls_req_list);
+	INIT_LIST_HEAD(&newrec->disc_list);
 	kref_init(&newrec->ref);
 	atomic_set(&newrec->act_ctrl_cnt, 0);
 	spin_lock_init(&newrec->lock);
@@ -3254,6 +3256,100 @@ static struct nvmf_transport_ops nvme_fc_transport = {
 	.create_ctrl	= nvme_fc_create_ctrl,
 };
 
+static void nvme_fc_discovery_unwind(struct list_head *lheadp)
+{
+	unsigned long flags;
+	struct nvme_fc_lport *lport;
+	struct nvme_fc_rport *rport, *tmp_rport;
+
+	list_for_each_entry_safe(rport, tmp_rport,
+				 lheadp, disc_list) {
+		spin_lock_irqsave(&nvme_fc_lock, flags);
+		list_del_init(&rport->disc_list);
+		spin_unlock_irqrestore(&nvme_fc_lock, flags);
+		lport = rport->lport;
+		/* signal discovery. Won't hurt if it repeats */
+		nvme_fc_signal_discovery_scan(lport, rport);
+		nvme_fc_rport_put(rport);
+		nvme_fc_lport_put(lport);
+	}
+}
+
+/* Arbitrary successive failures max. With lots of subsystems could be high */
+#define DISCOVERY_MAX_FAIL	20
+
+static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	unsigned long flags;
+	LIST_HEAD(nvme_fc_disc_list);
+	struct nvme_fc_lport *lport;
+	struct nvme_fc_rport *rport, *tmp_rport;
+	int failcnt = 0;
+
+restart:
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+		list_for_each_entry(rport, &lport->endp_list, endp_list) {
+			if (!nvme_fc_lport_get(lport))
+				continue;
+			if (!nvme_fc_rport_get(rport)) {
+				/*
+				 * This is a temporary condition, so upon
+				 * restart this node will be gone from the
+				 * list.
+				 */
+				spin_unlock_irqrestore(&nvme_fc_lock, flags);
+				nvme_fc_lport_put(lport);
+				nvme_fc_discovery_unwind(&nvme_fc_disc_list);
+				if (failcnt++ < DISCOVERY_MAX_FAIL)
+					goto restart;
+				pr_err("nvme_discovery: too many reference "
+				       "failures\n");
+				return 0;
+			}
+			if (list_empty(&rport->disc_list))
+				list_add_tail(&rport->disc_list,
+					      &nvme_fc_disc_list);
+		}
+	}
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	list_for_each_entry_safe(rport, tmp_rport,
+				 &nvme_fc_disc_list, disc_list) {
+		spin_lock_irqsave(&nvme_fc_lock, flags);
+		list_del_init(&rport->disc_list);
+		spin_unlock_irqrestore(&nvme_fc_lock, flags);
+		lport = rport->lport;
+		nvme_fc_signal_discovery_scan(lport, rport);
+		nvme_fc_rport_put(rport);
+		nvme_fc_lport_put(lport);
+	}
+	return count;
+}
+static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+
+static struct attribute *nvme_fc_attrs[] = {
+	&dev_attr_nvme_discovery.attr,
+	NULL
+};
+
+static struct attribute_group nvme_fc_attr_group = {
+	.attrs = nvme_fc_attrs,
+};
+
+static const struct attribute_group *nvme_fc_attr_groups[] = {
+	&nvme_fc_attr_group,
+	NULL
+};
+
+static struct class fc_class = {
+	.name = "fc",
+	.dev_groups = nvme_fc_attr_groups,
+	.owner = THIS_MODULE,
+};
+
 static int __init nvme_fc_init_module(void)
 {
 	int ret;
@@ -3272,16 +3368,16 @@ static int __init nvme_fc_init_module(void)
 	 * put in place, this code will move to a more generic
 	 * location for the class.
 	 */
-	fc_class = class_create(THIS_MODULE, "fc");
-	if (IS_ERR(fc_class)) {
+	ret = class_register(&fc_class);
+	if (ret) {
 		pr_err("couldn't register class fc\n");
-		return PTR_ERR(fc_class);
+		return ret;
 	}
 
 	/*
 	 * Create a device for the FC-centric udev events
 	 */
-	fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
+	fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
 				"fc_udev_device");
 	if (IS_ERR(fc_udev_device)) {
 		pr_err("couldn't create fc_udev device!\n");
@@ -3296,9 +3392,9 @@ static int __init nvme_fc_init_module(void)
 	return 0;
 
 out_destroy_device:
-	device_destroy(fc_class, MKDEV(0, 0));
+	device_destroy(&fc_class, MKDEV(0, 0));
 out_destroy_class:
-	class_destroy(fc_class);
+	class_unregister(&fc_class);
 	return ret;
 }
 
@@ -3313,8 +3409,8 @@ static void __exit nvme_fc_exit_module(void)
 	ida_destroy(&nvme_fc_local_port_cnt);
 	ida_destroy(&nvme_fc_ctrl_cnt);
 
-	device_destroy(fc_class, MKDEV(0, 0));
-	class_destroy(fc_class);
+	device_destroy(&fc_class, MKDEV(0, 0));
+	class_unregister(&fc_class);
 }
 
 module_init(nvme_fc_init_module);
-- 
2.13.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
  2018-08-20 22:06 [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device James Smart
@ 2018-09-05 20:14 ` James Smart
  2018-09-11  8:58 ` Christoph Hellwig
  1 sibling, 0 replies; 6+ messages in thread
From: James Smart @ 2018-09-05 20:14 UTC (permalink / raw)


what's the status on this ?

On 8/20/2018 3:06 PM, James Smart wrote:
> The fc transport device should allow for a rediscovery, as userspace
> might have lost the events. Example is udev events not handled during
> system startup.
>
> This patch add a sysfs entry 'nvme_discovery' on the fc class to
> have it replay all udev discovery events for all local port/remote
> port address pairs.
>
> Signed-off-by: James Smart <james.smart at broadcom.com>
> Reviewed-by: Hannes Reinecke <hare at suse.com>
> Cc: Ewan Milne <emilne at redhat.com>
> Cc: Johannes Thumshirn <jthumshirn at suse.com>
>
> ---
> reposting after it's last post in June
> ---
>   drivers/nvme/host/fc.c | 114 +++++++++++++++++++++++++++++++++++++++++++++----
>   1 file changed, 105 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
> index 611e70cae754..286629d1d2b8 100644
> --- a/drivers/nvme/host/fc.c
> +++ b/drivers/nvme/host/fc.c
> @@ -122,6 +122,7 @@ struct nvme_fc_rport {
>   	struct list_head		endp_list; /* for lport->endp_list */
>   	struct list_head		ctrl_list;
>   	struct list_head		ls_req_list;
> +	struct list_head		disc_list;
>   	struct device			*dev;	/* physical device for dma */
>   	struct nvme_fc_lport		*lport;
>   	spinlock_t			lock;
> @@ -210,7 +211,6 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
>    * These items are short-term. They will eventually be moved into
>    * a generic FC class. See comments in module init.
>    */
> -static struct class *fc_class;
>   static struct device *fc_udev_device;
>   
>   
> @@ -507,6 +507,7 @@ nvme_fc_free_rport(struct kref *ref)
>   	list_del(&rport->endp_list);
>   	spin_unlock_irqrestore(&nvme_fc_lock, flags);
>   
> +	WARN_ON(!list_empty(&rport->disc_list));
>   	ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
>   
>   	kfree(rport);
> @@ -694,6 +695,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
>   	INIT_LIST_HEAD(&newrec->endp_list);
>   	INIT_LIST_HEAD(&newrec->ctrl_list);
>   	INIT_LIST_HEAD(&newrec->ls_req_list);
> +	INIT_LIST_HEAD(&newrec->disc_list);
>   	kref_init(&newrec->ref);
>   	atomic_set(&newrec->act_ctrl_cnt, 0);
>   	spin_lock_init(&newrec->lock);
> @@ -3254,6 +3256,100 @@ static struct nvmf_transport_ops nvme_fc_transport = {
>   	.create_ctrl	= nvme_fc_create_ctrl,
>   };
>   
> +static void nvme_fc_discovery_unwind(struct list_head *lheadp)
> +{
> +	unsigned long flags;
> +	struct nvme_fc_lport *lport;
> +	struct nvme_fc_rport *rport, *tmp_rport;
> +
> +	list_for_each_entry_safe(rport, tmp_rport,
> +				 lheadp, disc_list) {
> +		spin_lock_irqsave(&nvme_fc_lock, flags);
> +		list_del_init(&rport->disc_list);
> +		spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +		lport = rport->lport;
> +		/* signal discovery. Won't hurt if it repeats */
> +		nvme_fc_signal_discovery_scan(lport, rport);
> +		nvme_fc_rport_put(rport);
> +		nvme_fc_lport_put(lport);
> +	}
> +}
> +
> +/* Arbitrary successive failures max. With lots of subsystems could be high */
> +#define DISCOVERY_MAX_FAIL	20
> +
> +static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
> +				       struct device_attribute *attr,
> +				       const char *buf, size_t count)
> +{
> +	unsigned long flags;
> +	LIST_HEAD(nvme_fc_disc_list);
> +	struct nvme_fc_lport *lport;
> +	struct nvme_fc_rport *rport, *tmp_rport;
> +	int failcnt = 0;
> +
> +restart:
> +	spin_lock_irqsave(&nvme_fc_lock, flags);
> +	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
> +		list_for_each_entry(rport, &lport->endp_list, endp_list) {
> +			if (!nvme_fc_lport_get(lport))
> +				continue;
> +			if (!nvme_fc_rport_get(rport)) {
> +				/*
> +				 * This is a temporary condition, so upon
> +				 * restart this node will be gone from the
> +				 * list.
> +				 */
> +				spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +				nvme_fc_lport_put(lport);
> +				nvme_fc_discovery_unwind(&nvme_fc_disc_list);
> +				if (failcnt++ < DISCOVERY_MAX_FAIL)
> +					goto restart;
> +				pr_err("nvme_discovery: too many reference "
> +				       "failures\n");
> +				return 0;
> +			}
> +			if (list_empty(&rport->disc_list))
> +				list_add_tail(&rport->disc_list,
> +					      &nvme_fc_disc_list);
> +		}
> +	}
> +	spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +
> +	list_for_each_entry_safe(rport, tmp_rport,
> +				 &nvme_fc_disc_list, disc_list) {
> +		spin_lock_irqsave(&nvme_fc_lock, flags);
> +		list_del_init(&rport->disc_list);
> +		spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +		lport = rport->lport;
> +		nvme_fc_signal_discovery_scan(lport, rport);
> +		nvme_fc_rport_put(rport);
> +		nvme_fc_lport_put(lport);
> +	}
> +	return count;
> +}
> +static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
> +
> +static struct attribute *nvme_fc_attrs[] = {
> +	&dev_attr_nvme_discovery.attr,
> +	NULL
> +};
> +
> +static struct attribute_group nvme_fc_attr_group = {
> +	.attrs = nvme_fc_attrs,
> +};
> +
> +static const struct attribute_group *nvme_fc_attr_groups[] = {
> +	&nvme_fc_attr_group,
> +	NULL
> +};
> +
> +static struct class fc_class = {
> +	.name = "fc",
> +	.dev_groups = nvme_fc_attr_groups,
> +	.owner = THIS_MODULE,
> +};
> +
>   static int __init nvme_fc_init_module(void)
>   {
>   	int ret;
> @@ -3272,16 +3368,16 @@ static int __init nvme_fc_init_module(void)
>   	 * put in place, this code will move to a more generic
>   	 * location for the class.
>   	 */
> -	fc_class = class_create(THIS_MODULE, "fc");
> -	if (IS_ERR(fc_class)) {
> +	ret = class_register(&fc_class);
> +	if (ret) {
>   		pr_err("couldn't register class fc\n");
> -		return PTR_ERR(fc_class);
> +		return ret;
>   	}
>   
>   	/*
>   	 * Create a device for the FC-centric udev events
>   	 */
> -	fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
> +	fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
>   				"fc_udev_device");
>   	if (IS_ERR(fc_udev_device)) {
>   		pr_err("couldn't create fc_udev device!\n");
> @@ -3296,9 +3392,9 @@ static int __init nvme_fc_init_module(void)
>   	return 0;
>   
>   out_destroy_device:
> -	device_destroy(fc_class, MKDEV(0, 0));
> +	device_destroy(&fc_class, MKDEV(0, 0));
>   out_destroy_class:
> -	class_destroy(fc_class);
> +	class_unregister(&fc_class);
>   	return ret;
>   }
>   
> @@ -3313,8 +3409,8 @@ static void __exit nvme_fc_exit_module(void)
>   	ida_destroy(&nvme_fc_local_port_cnt);
>   	ida_destroy(&nvme_fc_ctrl_cnt);
>   
> -	device_destroy(fc_class, MKDEV(0, 0));
> -	class_destroy(fc_class);
> +	device_destroy(&fc_class, MKDEV(0, 0));
> +	class_unregister(&fc_class);
>   }
>   
>   module_init(nvme_fc_init_module);

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
  2018-08-20 22:06 [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device James Smart
  2018-09-05 20:14 ` James Smart
@ 2018-09-11  8:58 ` Christoph Hellwig
  2018-09-11 18:02   ` James Smart
  1 sibling, 1 reply; 6+ messages in thread
From: Christoph Hellwig @ 2018-09-11  8:58 UTC (permalink / raw)


> +	unsigned long flags;
> +	struct nvme_fc_lport *lport;
> +	struct nvme_fc_rport *rport, *tmp_rport;
> +
> +	list_for_each_entry_safe(rport, tmp_rport,
> +				 lheadp, disc_list) {
> +		spin_lock_irqsave(&nvme_fc_lock, flags);
> +		list_del_init(&rport->disc_list);
> +		spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +		lport = rport->lport;
> +		/* signal discovery. Won't hurt if it repeats */
> +		nvme_fc_signal_discovery_scan(lport, rport);
> +		nvme_fc_rport_put(rport);
> +		nvme_fc_lport_put(lport);
> +	}

This list iteration is not safe.  It should probably be something like:

	spin_lock_irqsave(&nvme_fc_lock, flags);
	while (!list_empty(disc_list)) {
		struct nvme_fc_rport *rport = list_entry(disc_list->next,
				struct nvme_fc_lport, disct_list);

		list_del_init(&rport->disc_list);
		spin_unlock_irqrestore(&nvme_fc_lock, flags);

		lport = rport->lport;
		/* signal discovery. Won't hurt if it repeats */
		nvme_fc_signal_discovery_scan(lport, rport);
		nvme_fc_rport_put(rport);
		nvme_fc_lport_put(lport);

		spin_lock_irqsave(&nvme_fc_lock, flags);
	}
	spin_unlock_irqrestore(&nvme_fc_lock, flags);


> +			if (!nvme_fc_lport_get(lport))
> +				continue;
> +			if (!nvme_fc_rport_get(rport)) {
> +				/*
> +				 * This is a temporary condition, so upon
> +				 * restart this node will be gone from the
> +				 * list.
> +				 */
> +				spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +				nvme_fc_lport_put(lport);
> +				nvme_fc_discovery_unwind(&nvme_fc_disc_list);
> +				if (failcnt++ < DISCOVERY_MAX_FAIL)
> +					goto restart;
> +				pr_err("nvme_discovery: too many reference "
> +				       "failures\n");
> +				return 0;
> +			}

Maybe use a goto for this condition to move it out of the loop?

> +	list_for_each_entry_safe(rport, tmp_rport,
> +				 &nvme_fc_disc_list, disc_list) {
> +		spin_lock_irqsave(&nvme_fc_lock, flags);
> +		list_del_init(&rport->disc_list);
> +		spin_unlock_irqrestore(&nvme_fc_lock, flags);
> +		lport = rport->lport;
> +		nvme_fc_signal_discovery_scan(lport, rport);
> +		nvme_fc_rport_put(rport);
> +		nvme_fc_lport_put(lport);
> +	}

Same locking issue as above.  And in fact exactly the same code, so
it should probably call nvme_fc_discovery_unwind.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
  2018-09-11  8:58 ` Christoph Hellwig
@ 2018-09-11 18:02   ` James Smart
  2018-09-17 13:41     ` Christoph Hellwig
  0 siblings, 1 reply; 6+ messages in thread
From: James Smart @ 2018-09-11 18:02 UTC (permalink / raw)


On 9/11/2018 1:58 AM, Christoph Hellwig wrote:
>> +	unsigned long flags;
>> +	struct nvme_fc_lport *lport;
>> +	struct nvme_fc_rport *rport, *tmp_rport;
>> +
>> +	list_for_each_entry_safe(rport, tmp_rport,
>> +				 lheadp, disc_list) {
>> +		spin_lock_irqsave(&nvme_fc_lock, flags);
>> +		list_del_init(&rport->disc_list);
>> +		spin_unlock_irqrestore(&nvme_fc_lock, flags);
>> +		lport = rport->lport;
>> +		/* signal discovery. Won't hurt if it repeats */
>> +		nvme_fc_signal_discovery_scan(lport, rport);
>> +		nvme_fc_rport_put(rport);
>> +		nvme_fc_lport_put(lport);
>> +	}
> 
> This list iteration is not safe.  It should probably be something like:
> 
> 	spin_lock_irqsave(&nvme_fc_lock, flags);
> 	while (!list_empty(disc_list)) {
> 		struct nvme_fc_rport *rport = list_entry(disc_list->next,
> 				struct nvme_fc_lport, disct_list);
> 
> 		list_del_init(&rport->disc_list);
> 		spin_unlock_irqrestore(&nvme_fc_lock, flags);
> 
> 		lport = rport->lport;
> 		/* signal discovery. Won't hurt if it repeats */
> 		nvme_fc_signal_discovery_scan(lport, rport);
> 		nvme_fc_rport_put(rport);
> 		nvme_fc_lport_put(lport);
> 
> 		spin_lock_irqsave(&nvme_fc_lock, flags);
> 	}
> 	spin_unlock_irqrestore(&nvme_fc_lock, flags);

Well, I don't think I agree with you on the safeness of the locking, but 
moving it to the other style isn't different and looks more normal. I'll 
rework it.


> 
> 
>> +			if (!nvme_fc_lport_get(lport))
>> +				continue;
>> +			if (!nvme_fc_rport_get(rport)) {
>> +				/*
>> +				 * This is a temporary condition, so upon
>> +				 * restart this node will be gone from the
>> +				 * list.
>> +				 */
>> +				spin_unlock_irqrestore(&nvme_fc_lock, flags);
>> +				nvme_fc_lport_put(lport);
>> +				nvme_fc_discovery_unwind(&nvme_fc_disc_list);
>> +				if (failcnt++ < DISCOVERY_MAX_FAIL)
>> +					goto restart;
>> +				pr_err("nvme_discovery: too many reference "
>> +				       "failures\n");
>> +				return 0;
>> +			}
> 
> Maybe use a goto for this condition to move it out of the loop?

May not be necessary. I think I want to change this loop a little.

> 
>> +	list_for_each_entry_safe(rport, tmp_rport,
>> +				 &nvme_fc_disc_list, disc_list) {
>> +		spin_lock_irqsave(&nvme_fc_lock, flags);
>> +		list_del_init(&rport->disc_list);
>> +		spin_unlock_irqrestore(&nvme_fc_lock, flags);
>> +		lport = rport->lport;
>> +		nvme_fc_signal_discovery_scan(lport, rport);
>> +		nvme_fc_rport_put(rport);
>> +		nvme_fc_lport_put(lport);
>> +	}
> 
> Same locking issue as above.  And in fact exactly the same code, so
> it should probably call nvme_fc_discovery_unwind.

ok

-- james

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
  2018-09-11 18:02   ` James Smart
@ 2018-09-17 13:41     ` Christoph Hellwig
  2018-09-17 16:07       ` James Smart
  0 siblings, 1 reply; 6+ messages in thread
From: Christoph Hellwig @ 2018-09-17 13:41 UTC (permalink / raw)


On Tue, Sep 11, 2018@11:02:13AM -0700, James Smart wrote:
> Well, I don't think I agree with you on the safeness of the locking, but
> moving it to the other style isn't different and looks more normal. I'll
> rework it.

You are iterating the list without the lock and only take the lock
to remove the entry - this is racy vs another thread adding to or
removing from the list.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device
  2018-09-17 13:41     ` Christoph Hellwig
@ 2018-09-17 16:07       ` James Smart
  0 siblings, 0 replies; 6+ messages in thread
From: James Smart @ 2018-09-17 16:07 UTC (permalink / raw)




On 9/17/2018 6:41 AM, Christoph Hellwig wrote:
> On Tue, Sep 11, 2018@11:02:13AM -0700, James Smart wrote:
>> Well, I don't think I agree with you on the safeness of the locking, but
>> moving it to the other style isn't different and looks more normal. I'll
>> rework it.
> You are iterating the list without the lock and only take the lock
> to remove the entry - this is racy vs another thread adding to or
> removing from the list.

:)? By definition, there is only one thread processing a list. Lock was 
only to be coherent on the entry being on a list or not vs another thread.

But we're good.? The rework was a good idea.? Thanks.

-- james

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-09-17 16:07 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-20 22:06 [REPOST][PATCH] nvme_fc: add 'nvme_discovery' sysfs attribute to fc transport device James Smart
2018-09-05 20:14 ` James Smart
2018-09-11  8:58 ` Christoph Hellwig
2018-09-11 18:02   ` James Smart
2018-09-17 13:41     ` Christoph Hellwig
2018-09-17 16:07       ` James Smart

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.