All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] nvme-multipath: round-robin I/O policy
@ 2018-11-15 12:29 Hannes Reinecke
  2018-11-15 12:29 ` [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute Hannes Reinecke
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-15 12:29 UTC (permalink / raw)


Hi all,

after my NUMA path balancing patch hasn't met with universal approval,
here's now my take on a 'real' round-robin I/O scheduler for NVMe multipathing.
With this patch I'm able to boost I/O performance from 127k/127k randrw
to 156k/156k randrw (as measured by fio).
Testbed was two dual 32G FC HBAs connected back-to-back with nvmet
against a 1G zram-backed namespace.

As usual, comments and reviews are welcome.

Hannes Reinecke (2):
  nvme-multipath: add 'iopolicy' subsystem attribute
  nvme-multipath: round-robin I/O policy

 drivers/nvme/host/core.c      |  6 +++
 drivers/nvme/host/multipath.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
 drivers/nvme/host/nvme.h      |  8 ++++
 3 files changed, 101 insertions(+), 1 deletion(-)

-- 
2.16.4

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute
  2018-11-15 12:29 [PATCH 0/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
@ 2018-11-15 12:29 ` Hannes Reinecke
  2018-11-15 17:35   ` Sagi Grimberg
  2018-11-15 12:29 ` [PATCH 2/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-15 12:29 UTC (permalink / raw)


Add a sysfs attribute 'iopolicy' to the subsystem to allow for
distinct I/O policies.

Signed-off-by: Hannes Reinecke <hare at suse.com>
---
 drivers/nvme/host/core.c      |  6 ++++++
 drivers/nvme/host/multipath.c | 42 +++++++++++++++++++++++++++++++++++++++++-
 drivers/nvme/host/nvme.h      |  7 +++++++
 3 files changed, 54 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8e1c6ddf0368..f4f5b4991f1e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2176,6 +2176,9 @@ static struct attribute *nvme_subsys_attrs[] = {
 	&subsys_attr_serial.attr,
 	&subsys_attr_firmware_rev.attr,
 	&subsys_attr_subsysnqn.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+	&subsys_attr_iopolicy.attr,
+#endif
 	NULL,
 };
 
@@ -2228,6 +2231,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
 	subsys->vendor_id = le16_to_cpu(id->vid);
 	subsys->cmic = id->cmic;
+#ifdef CONFIG_NVME_MULTIPATH
+	subsys->iopolicy = NVME_IOPOLICY_NUMA;
+#endif
 
 	subsys->dev.class = nvme_subsys_class;
 	subsys->dev.release = nvme_release_subsystem;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 8e03cda770c5..1f97293380e2 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -141,7 +141,10 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
 		    test_bit(NVME_NS_ANA_PENDING, &ns->flags))
 			continue;
 
-		distance = node_distance(node, ns->ctrl->numa_node);
+		if (head->subsys->iopolicy == NVME_IOPOLICY_NUMA)
+			distance = node_distance(node, ns->ctrl->numa_node);
+		else
+			distance = LOCAL_DISTANCE;
 
 		switch (ns->ana_state) {
 		case NVME_ANA_OPTIMIZED:
@@ -486,6 +489,43 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl)
 	cancel_work_sync(&ctrl->ana_work);
 }
 
+#define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
+	struct device_attribute subsys_attr_##_name =	\
+		__ATTR(_name, _mode, _show, _store)
+
+static const char *nvme_iopolicy_names[] = {
+	[NVME_IOPOLICY_UNKNOWN] = "unknown",
+	[NVME_IOPOLICY_NONE] = "none",
+	[NVME_IOPOLICY_NUMA] = "numa",
+};
+
+static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvme_subsystem *subsys =
+		container_of(dev, struct nvme_subsystem, dev);
+
+	return sprintf(buf, "%s\n", nvme_iopolicy_names[subsys->iopolicy]);
+}
+
+static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int iopolicy = NVME_IOPOLICY_UNKNOWN;
+
+	if (!strncmp(buf, "none", 4))
+		iopolicy = NVME_IOPOLICY_NONE;
+	else if (!strncmp(buf, "numa", 4))
+		iopolicy = NVME_IOPOLICY_NUMA;
+
+	if (iopolicy == NVME_IOPOLICY_UNKNOWN)
+		return -EINVAL;
+
+	return count;
+}
+SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
+		      nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
+
 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9e1a51fed0c1..c8642ebeb4a3 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -259,6 +259,12 @@ struct nvme_subsystem {
 	u8			cmic;
 	u16			vendor_id;
 	struct ida		ns_ida;
+#ifdef CONFIG_NVME_MULTIPATH
+#define NVME_IOPOLICY_UNKNOWN 0
+#define NVME_IOPOLICY_NONE 1
+#define NVME_IOPOLICY_NUMA 2
+	unsigned int		iopolicy;
+#endif
 };
 
 /*
@@ -489,6 +495,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 
 extern struct device_attribute dev_attr_ana_grpid;
 extern struct device_attribute dev_attr_ana_state;
+extern struct device_attribute subsys_attr_iopolicy;
 
 #else
 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
  2018-11-15 12:29 [PATCH 0/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
  2018-11-15 12:29 ` [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute Hannes Reinecke
@ 2018-11-15 12:29 ` Hannes Reinecke
  2018-11-20 16:42   ` Christoph Hellwig
  2018-11-16  8:26 ` [PATCH 0/2] " Christoph Hellwig
  2018-12-05 20:05 ` Ewan D. Milne
  3 siblings, 1 reply; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-15 12:29 UTC (permalink / raw)


Add a simple round-robin I/O policy for multipathing.

Signed-off-by: Hannes Reinecke <hare at suse.com>
---
 drivers/nvme/host/multipath.c | 46 +++++++++++++++++++++++++++++++++++++++++++
 drivers/nvme/host/nvme.h      |  1 +
 2 files changed, 47 insertions(+)

diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1f97293380e2..fed3eea9da9a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -177,14 +177,52 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
 		ns->ana_state == NVME_ANA_OPTIMIZED;
 }
 
+inline struct nvme_ns *__nvme_next_path(struct nvme_ns_head *head, int node,
+					struct nvme_ns *old)
+{
+	struct nvme_ns *ns, *found = NULL;
+
+	do {
+		ns = list_next_or_null_rcu(&head->list, &old->siblings,
+					   struct nvme_ns, siblings);
+		if (!ns) {
+			ns = list_first_or_null_rcu(&head->list, struct nvme_ns,
+						    siblings);
+			if (ns && ns == old)
+				/*
+				 * The list consists of just one entry.
+				 * Sorry for the noise :-)
+				 */
+				return old;
+			else if (!ns)
+				break;
+		}
+		if (nvme_path_is_optimized(ns)) {
+			found = ns;
+			break;
+		}
+	} while (ns != old);
+
+	if (found)
+		rcu_assign_pointer(head->current_path[node], found);
+
+	return found;
+}
+
 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
 {
 	int node = numa_node_id();
 	struct nvme_ns *ns;
 
 	ns = srcu_dereference(head->current_path[node], &head->srcu);
+retry:
 	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
 		ns = __nvme_find_path(head, node);
+	else if (head->subsys->iopolicy == NVME_IOPOLICY_RR) {
+		ns = __nvme_next_path(head, node, ns);
+		if (!ns)
+			goto retry;
+	}
 	return ns;
 }
 
@@ -497,6 +535,7 @@ static const char *nvme_iopolicy_names[] = {
 	[NVME_IOPOLICY_UNKNOWN] = "unknown",
 	[NVME_IOPOLICY_NONE] = "none",
 	[NVME_IOPOLICY_NUMA] = "numa",
+	[NVME_IOPOLICY_RR] = "round-robin",
 };
 
 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
@@ -512,15 +551,22 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	unsigned int iopolicy = NVME_IOPOLICY_UNKNOWN;
+	struct nvme_subsystem *subsys =
+		container_of(dev, struct nvme_subsystem, dev);
 
 	if (!strncmp(buf, "none", 4))
 		iopolicy = NVME_IOPOLICY_NONE;
 	else if (!strncmp(buf, "numa", 4))
 		iopolicy = NVME_IOPOLICY_NUMA;
+	else if (!strncmp(buf, "round-robin", 11))
+		iopolicy = NVME_IOPOLICY_RR;
 
 	if (iopolicy == NVME_IOPOLICY_UNKNOWN)
 		return -EINVAL;
 
+	mutex_lock(&subsys->lock);
+	subsys->iopolicy = iopolicy;
+	mutex_unlock(&subsys->lock);
 	return count;
 }
 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c8642ebeb4a3..2fb124548c0c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -263,6 +263,7 @@ struct nvme_subsystem {
 #define NVME_IOPOLICY_UNKNOWN 0
 #define NVME_IOPOLICY_NONE 1
 #define NVME_IOPOLICY_NUMA 2
+#define NVME_IOPOLICY_RR 3
 	unsigned int		iopolicy;
 #endif
 };
-- 
2.16.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute
  2018-11-15 12:29 ` [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute Hannes Reinecke
@ 2018-11-15 17:35   ` Sagi Grimberg
  2018-11-16  8:07     ` Hannes Reinecke
  0 siblings, 1 reply; 15+ messages in thread
From: Sagi Grimberg @ 2018-11-15 17:35 UTC (permalink / raw)



> +static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
> +		struct device_attribute *attr, const char *buf, size_t count)
> +{
> +	unsigned int iopolicy = NVME_IOPOLICY_UNKNOWN;
> +
> +	if (!strncmp(buf, "none", 4))
> +		iopolicy = NVME_IOPOLICY_NONE;
> +	else if (!strncmp(buf, "numa", 4))
> +		iopolicy = NVME_IOPOLICY_NUMA;
> +
> +	if (iopolicy == NVME_IOPOLICY_UNKNOWN)
> +		return -EINVAL;

Missed assignment to subsys->iopolicy?

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute
  2018-11-15 17:35   ` Sagi Grimberg
@ 2018-11-16  8:07     ` Hannes Reinecke
  0 siblings, 0 replies; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-16  8:07 UTC (permalink / raw)


On 11/15/18 6:35 PM, Sagi Grimberg wrote:
> 
>> +static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
>> +??????? struct device_attribute *attr, const char *buf, size_t count)
>> +{
>> +??? unsigned int iopolicy = NVME_IOPOLICY_UNKNOWN;
>> +
>> +??? if (!strncmp(buf, "none", 4))
>> +??????? iopolicy = NVME_IOPOLICY_NONE;
>> +??? else if (!strncmp(buf, "numa", 4))
>> +??????? iopolicy = NVME_IOPOLICY_NUMA;
>> +
>> +??? if (iopolicy == NVME_IOPOLICY_UNKNOWN)
>> +??????? return -EINVAL;
> 
> Missed assignment to subsys->iopolicy?
> 
Yeah, moved to the next patch.
Will be cleaned up in the next round.

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		               zSeries & Storage
hare at suse.com			               +49 911 74053 688
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 N?rnberg
GF: F. Imend?rffer, J. Smithard, D. Upmanyu, G. Norton
HRB 21284 (AG N?rnberg)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 0/2] nvme-multipath: round-robin I/O policy
  2018-11-15 12:29 [PATCH 0/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
  2018-11-15 12:29 ` [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute Hannes Reinecke
  2018-11-15 12:29 ` [PATCH 2/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
@ 2018-11-16  8:26 ` Christoph Hellwig
  2018-11-20 16:02   ` Hannes Reinecke
  2018-12-05 20:05 ` Ewan D. Milne
  3 siblings, 1 reply; 15+ messages in thread
From: Christoph Hellwig @ 2018-11-16  8:26 UTC (permalink / raw)


On Thu, Nov 15, 2018@01:29:25PM +0100, Hannes Reinecke wrote:
> Hi all,
> 
> after my NUMA path balancing patch hasn't met with universal approval,

Were there any good arguments against it vs just my implementation
nitpicks?

> here's now my take on a 'real' round-robin I/O scheduler for NVMe multipathing.
> With this patch I'm able to boost I/O performance from 127k/127k randrw
> to 156k/156k randrw (as measured by fio).

Either way that is some horribly bad performance.  How much can
you get by driving one namespace on each path invididually?

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 0/2] nvme-multipath: round-robin I/O policy
  2018-11-16  8:26 ` [PATCH 0/2] " Christoph Hellwig
@ 2018-11-20 16:02   ` Hannes Reinecke
  2018-11-20 16:19     ` Christoph Hellwig
  0 siblings, 1 reply; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-20 16:02 UTC (permalink / raw)


On 11/16/18 9:26 AM, Christoph Hellwig wrote:
> On Thu, Nov 15, 2018@01:29:25PM +0100, Hannes Reinecke wrote:
>> Hi all,
>>
>> after my NUMA path balancing patch hasn't met with universal approval,
> 
> Were there any good arguments against it vs just my implementation
> nitpicks?
> 
We had gotten reports that I/O only ever went via the first path, 
despite the node map being set correctly.

Plus the distribution algorithm to spread out the paths across the 
individual nodes is far from simple.

Also once we abandon direct NUMA mapping there's no good reason why we 
should be selecting paths based on NUMA node; it might actually be 
better to use the core id here, as then we could map as many paths as we 
have CPUs.

So in the end the round-robin scheduler was just easier to implement, 
and actually provided better performance on my testbed.

>> here's now my take on a 'real' round-robin I/O scheduler for NVMe multipathing.
>> With this patch I'm able to boost I/O performance from 127k/127k randrw
>> to 156k/156k randrw (as measured by fio).
> 
> Either way that is some horribly bad performance.  How much can
> you get by driving one namespace on each path invididually?
> 
Well ... might be, but these were the numbers I had been getting without 
tweaking the system. So while it might be possible to get better 
numbers, the question then would be why this hasn't been done automatically.

Seeing that the whole point of this patch is to _avoid_ additional 
tuning ...

Cheers,

Hannes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 0/2] nvme-multipath: round-robin I/O policy
  2018-11-20 16:02   ` Hannes Reinecke
@ 2018-11-20 16:19     ` Christoph Hellwig
  0 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2018-11-20 16:19 UTC (permalink / raw)


On Tue, Nov 20, 2018@05:02:14PM +0100, Hannes Reinecke wrote:
> Also once we abandon direct NUMA mapping there's no good reason why we 
> should be selecting paths based on NUMA node; it might actually be better 
> to use the core id here, as then we could map as many paths as we have 
> CPUs.

True.

> So in the end the round-robin scheduler was just easier to implement, and 
> actually provided better performance on my testbed.

Oh well.  I guess we can pick it up as a tribute to FC given that it
isn't all that horrible.  I just hate using a dumb scheme like round
robin when we otherwise spend all our time optimizing every last cycle
and cache line.  I'll do another detailed review and will consider
picking it up as a non-default choice.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
  2018-11-15 12:29 ` [PATCH 2/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
@ 2018-11-20 16:42   ` Christoph Hellwig
  2018-11-20 20:30     ` Hannes Reinecke
  0 siblings, 1 reply; 15+ messages in thread
From: Christoph Hellwig @ 2018-11-20 16:42 UTC (permalink / raw)


On Thu, Nov 15, 2018@01:29:27PM +0100, Hannes Reinecke wrote:
> Add a simple round-robin I/O policy for multipathing.
> 
> Signed-off-by: Hannes Reinecke <hare at suse.com>
> ---
>  drivers/nvme/host/multipath.c | 46 +++++++++++++++++++++++++++++++++++++++++++
>  drivers/nvme/host/nvme.h      |  1 +
>  2 files changed, 47 insertions(+)
> 
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index 1f97293380e2..fed3eea9da9a 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -177,14 +177,52 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>  		ns->ana_state == NVME_ANA_OPTIMIZED;
>  }
>  
> +inline struct nvme_ns *__nvme_next_path(struct nvme_ns_head *head, int node,
> +					struct nvme_ns *old)

This seems to miss a static.  Also maybe put a rr in somewhere, e.g.:

static inline struct nvme_ns *
nvme_round_robin_path(struct nvme_ns_head *head, int node,
		struct nvme_ns *old)

> +	do {
> +		ns = list_next_or_null_rcu(&head->list, &old->siblings,
> +					   struct nvme_ns, siblings);
> +		if (!ns) {
> +			ns = list_first_or_null_rcu(&head->list, struct nvme_ns,
> +						    siblings);
> +			if (ns && ns == old)
> +				/*
> +				 * The list consists of just one entry.
> +				 * Sorry for the noise :-)
> +				 */
> +				return old;
> +			else if (!ns)

No need for a else after a return.

> +				break;
> +		}
> +		if (nvme_path_is_optimized(ns)) {
> +			found = ns;
> +			break;
> +		}

So you never consider non-optimized paths here?

>  inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>  {
>  	int node = numa_node_id();
>  	struct nvme_ns *ns;
>  
>  	ns = srcu_dereference(head->current_path[node], &head->srcu);
> +retry:
>  	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
>  		ns = __nvme_find_path(head, node);
> +	else if (head->subsys->iopolicy == NVME_IOPOLICY_RR) {
> +		ns = __nvme_next_path(head, node, ns);
> +		if (!ns)
> +			goto retry;
> +	}

Can you move the __nvme_find_path call for the first path into
your round robing helper so that we can make this whole thing
a little cleaner?  E.g.

	if (head->subsys->iopolicy == NVME_IOPOLICY_RR)
		return nvme_round_robin_path(head, node);
 	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
		ns = __nvme_find_path(head, node);
	return ns;

> @@ -497,6 +535,7 @@ static const char *nvme_iopolicy_names[] = {
>  	[NVME_IOPOLICY_UNKNOWN] = "unknown",
>  	[NVME_IOPOLICY_NONE] = "none",
>  	[NVME_IOPOLICY_NUMA] = "numa",
> +	[NVME_IOPOLICY_RR] = "round-robin",

I think you want to merge your I/O-policy attribute patch into this
one.  And we should probably kill all entries except for numa and
round-robin.

> +	struct nvme_subsystem *subsys =
> +		container_of(dev, struct nvme_subsystem, dev);
>  
>  	if (!strncmp(buf, "none", 4))
>  		iopolicy = NVME_IOPOLICY_NONE;
>  	else if (!strncmp(buf, "numa", 4))
>  		iopolicy = NVME_IOPOLICY_NUMA;
> +	else if (!strncmp(buf, "round-robin", 11))
> +		iopolicy = NVME_IOPOLICY_RR;

This could just loop over the above array for matches.

> +	mutex_lock(&subsys->lock);
> +	subsys->iopolicy = iopolicy;
> +	mutex_unlock(&subsys->lock);

Rather pointless to take a lock for updating a single value.
Just use WRITE_ONCE/READ_ONCE.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
  2018-11-20 16:42   ` Christoph Hellwig
@ 2018-11-20 20:30     ` Hannes Reinecke
  2018-11-21  8:28       ` Christoph Hellwig
       [not found]       ` <8a583536-151e-6f68-f4f9-98d8c4b853dd@broadcom.com>
  0 siblings, 2 replies; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-20 20:30 UTC (permalink / raw)


On 11/20/18 5:42 PM, Christoph Hellwig wrote:
> On Thu, Nov 15, 2018@01:29:27PM +0100, Hannes Reinecke wrote:
>> Add a simple round-robin I/O policy for multipathing.
>>
>> Signed-off-by: Hannes Reinecke <hare at suse.com>
>> ---
>>   drivers/nvme/host/multipath.c | 46 +++++++++++++++++++++++++++++++++++++++++++
>>   drivers/nvme/host/nvme.h      |  1 +
>>   2 files changed, 47 insertions(+)
>>
>> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
>> index 1f97293380e2..fed3eea9da9a 100644
>> --- a/drivers/nvme/host/multipath.c
>> +++ b/drivers/nvme/host/multipath.c
>> @@ -177,14 +177,52 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>>   		ns->ana_state == NVME_ANA_OPTIMIZED;
>>   }
>>   
>> +inline struct nvme_ns *__nvme_next_path(struct nvme_ns_head *head, int node,
>> +					struct nvme_ns *old)
> 
> This seems to miss a static.  Also maybe put a rr in somewhere, e.g.:
> 
> static inline struct nvme_ns *
> nvme_round_robin_path(struct nvme_ns_head *head, int node,
> 		struct nvme_ns *old)
> 
Okay.

>> +	do {
>> +		ns = list_next_or_null_rcu(&head->list, &old->siblings,
>> +					   struct nvme_ns, siblings);
>> +		if (!ns) {
>> +			ns = list_first_or_null_rcu(&head->list, struct nvme_ns,
>> +						    siblings);
>> +			if (ns && ns == old)
>> +				/*
>> +				 * The list consists of just one entry.
>> +				 * Sorry for the noise :-)
>> +				 */
>> +				return old;
>> +			else if (!ns)
> 
> No need for a else after a return.
> 
Yes.

>> +				break;
>> +		}
>> +		if (nvme_path_is_optimized(ns)) {
>> +			found = ns;
>> +			break;
>> +		}
> 
> So you never consider non-optimized paths here?
> 
Yes, correct. Two reasons:
- We try to optimize for performance. Selecting a non-optimized path per 
definition doesn't provide that.
- The selection algorithm becomes overly complex, and will require an 
even more complex mechanism to figure out at which point the use of a 
non-optimized path become feasible.

>>   inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>>   {
>>   	int node = numa_node_id();
>>   	struct nvme_ns *ns;
>>   
>>   	ns = srcu_dereference(head->current_path[node], &head->srcu);
>> +retry:
>>   	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
>>   		ns = __nvme_find_path(head, node);
>> +	else if (head->subsys->iopolicy == NVME_IOPOLICY_RR) {
>> +		ns = __nvme_next_path(head, node, ns);
>> +		if (!ns)
>> +			goto retry;
>> +	}
> 
> Can you move the __nvme_find_path call for the first path into
> your round robing helper so that we can make this whole thing
> a little cleaner?  E.g.
> 
> 	if (head->subsys->iopolicy == NVME_IOPOLICY_RR)
> 		return nvme_round_robin_path(head, node);
>   	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
> 		ns = __nvme_find_path(head, node);
> 	return ns;
> Actually this order was by design.
By ordering it _after_ __nvme_find_path() we guarantee that the first 
round found an optimal path, and so we can continue selecting the next 
optimal path.
So this ordering would break the assumptions I did when implementing it.
But I'll have a look to see if that can't be lifted.

>> @@ -497,6 +535,7 @@ static const char *nvme_iopolicy_names[] = {
>>   	[NVME_IOPOLICY_UNKNOWN] = "unknown",
>>   	[NVME_IOPOLICY_NONE] = "none",
>>   	[NVME_IOPOLICY_NUMA] = "numa",
>> +	[NVME_IOPOLICY_RR] = "round-robin",
> 
> I think you want to merge your I/O-policy attribute patch into this
> one.  And we should probably kill all entries except for numa and
> round-robin.
> 
Sure, can do.

>> +	struct nvme_subsystem *subsys =
>> +		container_of(dev, struct nvme_subsystem, dev);
>>   
>>   	if (!strncmp(buf, "none", 4))
>>   		iopolicy = NVME_IOPOLICY_NONE;
>>   	else if (!strncmp(buf, "numa", 4))
>>   		iopolicy = NVME_IOPOLICY_NUMA;
>> +	else if (!strncmp(buf, "round-robin", 11))
>> +		iopolicy = NVME_IOPOLICY_RR;
> 
> This could just loop over the above array for matches.
> 
Yeah, was too lazy. Will be doing it.

>> +	mutex_lock(&subsys->lock);
>> +	subsys->iopolicy = iopolicy;
>> +	mutex_unlock(&subsys->lock);
> 
> Rather pointless to take a lock for updating a single value.
> Just use WRITE_ONCE/READ_ONCE.
> 
Indeed. That was just the 'obviously correct' solution.

Will be reposting the patch.

Thanks for the review.

Cheers,

Hannes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
  2018-11-20 20:30     ` Hannes Reinecke
@ 2018-11-21  8:28       ` Christoph Hellwig
  2018-11-21 11:24         ` Hannes Reinecke
       [not found]       ` <8a583536-151e-6f68-f4f9-98d8c4b853dd@broadcom.com>
  1 sibling, 1 reply; 15+ messages in thread
From: Christoph Hellwig @ 2018-11-21  8:28 UTC (permalink / raw)


On Tue, Nov 20, 2018@09:30:32PM +0100, Hannes Reinecke wrote:
>> So you never consider non-optimized paths here?
>>
> Yes, correct. Two reasons:
> - We try to optimize for performance. Selecting a non-optimized path per 
> definition doesn't provide that.
> - The selection algorithm becomes overly complex, and will require an even 
> more complex mechanism to figure out at which point the use of a 
> non-optimized path become feasible.

We can still have a system where we don't see any optimized path at all,
so you will need some kind of fallback.

>> Can you move the __nvme_find_path call for the first path into
>> your round robing helper so that we can make this whole thing
>> a little cleaner?  E.g.
>>
>> 	if (head->subsys->iopolicy == NVME_IOPOLICY_RR)
>> 		return nvme_round_robin_path(head, node);
>>   	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
>> 		ns = __nvme_find_path(head, node);
>> 	return ns;
>> Actually this order was by design.
> By ordering it _after_ __nvme_find_path() we guarantee that the first round 
> found an optimal path, and so we can continue selecting the next optimal 
> path.
> So this ordering would break the assumptions I did when implementing it.
> But I'll have a look to see if that can't be lifted.

You can keep the __nvme_find_path for round robin, I just want it moved
into the round robin helper, so that the main function stays nice and tidy.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
       [not found]       ` <8a583536-151e-6f68-f4f9-98d8c4b853dd@broadcom.com>
@ 2018-11-21 11:09         ` Hannes Reinecke
  2018-11-22 13:52         ` Hannes Reinecke
  1 sibling, 0 replies; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-21 11:09 UTC (permalink / raw)


On 11/20/18 9:41 PM, James Smart wrote:
> 
> 
> On 11/20/2018 12:30 PM, Hannes Reinecke wrote:
>>> So you never consider non-optimized paths here?
>>>
>> Yes, correct. Two reasons:
>> - We try to optimize for performance. Selecting a non-optimized path 
>> per definition doesn't provide that.
>> - The selection algorithm becomes overly complex, and will require an 
>> even more complex mechanism to figure out at which point the use of a 
>> non-optimized path become feasible.
> 
> so what do you do on an out-of-optimized-paths scenario ??? I would 
> assume the same logic should be applicable, only looking for 
> non-optimized.?? And if both optimized and non-optimized are 
> out-of-paths, then it should revert to the same logic, just inaccessible 
> paths.? I believe the last sentence is required for current devices(s) 
> out there to induce storage failover.
> 
The idea is to a round-robin load balancing for optimal paths, and fall 
back to single path once we run out of optimal paths.

As already explained, one could implement a round-robin algorithm for 
non-optimial paths, too, but the question then arises how to handle 
scenarios where we have both.
Clearly, in the optimal case we should be scheduling between optimal 
paths only.
But what happens when the optimal paths do down?
Shall we wait until all optimal paths are down, and only then switch 
over to the non-optimal ones?
But when doing that we might end up in a situation where we have only 
one optimal path, and several non-optimal ones.
And as the underlying reason was that several paths provide a better 
performance than a single one. there will be a cut-over point when 
several non-optimal paths might provide a better performance than a 
single optimal one.
But that would be pretty much implementation defined, and hard to quantify.

Hence I settled on the rather trivial algorithm with considering optimal 
paths only.

Cheers,

Hannes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
  2018-11-21  8:28       ` Christoph Hellwig
@ 2018-11-21 11:24         ` Hannes Reinecke
  0 siblings, 0 replies; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-21 11:24 UTC (permalink / raw)


On 11/21/18 9:28 AM, Christoph Hellwig wrote:
> On Tue, Nov 20, 2018@09:30:32PM +0100, Hannes Reinecke wrote:
>>> So you never consider non-optimized paths here?
>>>
>> Yes, correct. Two reasons:
>> - We try to optimize for performance. Selecting a non-optimized path per
>> definition doesn't provide that.
>> - The selection algorithm becomes overly complex, and will require an even
>> more complex mechanism to figure out at which point the use of a
>> non-optimized path become feasible.
> 
> We can still have a system where we don't see any optimized path at all,
> so you will need some kind of fallback.
> 
Well, the fallback is to use the original implementation, ie use a 
single path only.
And as already outlined in another mail, taking both optimized and 
non-optimized paths into account would increase the complexity of the 
algorithm massively.
Hence I'd prefer to have the simple implementation in for now, and 
improve upon it or add other algorithms if and when the need arises.

>>> Can you move the __nvme_find_path call for the first path into
>>> your round robing helper so that we can make this whole thing
>>> a little cleaner?  E.g.
>>>
>>> 	if (head->subsys->iopolicy == NVME_IOPOLICY_RR)
>>> 		return nvme_round_robin_path(head, node);
>>>    	if (unlikely(!ns || !nvme_path_is_optimized(ns)))
>>> 		ns = __nvme_find_path(head, node);
>>> 	return ns;
>>> Actually this order was by design.
>> By ordering it _after_ __nvme_find_path() we guarantee that the first round
>> found an optimal path, and so we can continue selecting the next optimal
>> path.
>> So this ordering would break the assumptions I did when implementing it.
>> But I'll have a look to see if that can't be lifted.
> 
> You can keep the __nvme_find_path for round robin, I just want it moved
> into the round robin helper, so that the main function stays nice and tidy.
> 
Ah, that's what you mean.
Sure, no problem at all.

Cheers,

Hannes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/2] nvme-multipath: round-robin I/O policy
       [not found]       ` <8a583536-151e-6f68-f4f9-98d8c4b853dd@broadcom.com>
  2018-11-21 11:09         ` Hannes Reinecke
@ 2018-11-22 13:52         ` Hannes Reinecke
  1 sibling, 0 replies; 15+ messages in thread
From: Hannes Reinecke @ 2018-11-22 13:52 UTC (permalink / raw)


On 11/20/18 9:41 PM, James Smart wrote:
> 
> 
> On 11/20/2018 12:30 PM, Hannes Reinecke wrote:
>>> So you never consider non-optimized paths here?
>>>
>> Yes, correct. Two reasons:
>> - We try to optimize for performance. Selecting a non-optimized path 
>> per definition doesn't provide that.
>> - The selection algorithm becomes overly complex, and will require an 
>> even more complex mechanism to figure out at which point the use of a 
>> non-optimized path become feasible.
> 
> so what do you do on an out-of-optimized-paths scenario ??? I would 
> assume the same logic should be applicable, only looking for 
> non-optimized.?? And if both optimized and non-optimized are 
> out-of-paths, then it should revert to the same logic, just inaccessible 
> paths.? I believe the last sentence is required for current devices(s) 
> out there to induce storage failover.
> 
Errm.

I believe you are referring to this bit of the spec:
 > If no controllers are reporting ANA Optimized state or ANA
 > Non-Optimized state, then a transition may be occurring such
 > that a controller reporting the Inaccessible state may become
 > accessible and the host should retry the command on the controller
 > reporting Inaccessible state for at least ANATT seconds

But from my interpretation this is only valid if the target is _already_ 
transitioning (... a transitioning may be occuring ...), and is not 
intended to _induce_ a transtion.
And if you happen refer to the slightly 'odd' E-Series behaviour, we're 
already in discussion with them to get that fixed up :-)

But in general, an explicit failover with sending I/O to the 
inaccessible path is _NOT_ something we assume, not what the spec intended.
That really brings back horrible memories from the old AVT mechanism in 
RDAC, which never really worked and only caused issued during booting.
If some poor soul really had implemented it we should point him to the 
error of his ways ...

But still, yes, we could follow with a second round of trying the 
non-optimized path, but this would happen only if we ran out of 
optimized paths.

As you're not the first requesting that, I'll be including that with my 
next version.

Cheers,

Hannes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 0/2] nvme-multipath: round-robin I/O policy
  2018-11-15 12:29 [PATCH 0/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
                   ` (2 preceding siblings ...)
  2018-11-16  8:26 ` [PATCH 0/2] " Christoph Hellwig
@ 2018-12-05 20:05 ` Ewan D. Milne
  3 siblings, 0 replies; 15+ messages in thread
From: Ewan D. Milne @ 2018-12-05 20:05 UTC (permalink / raw)


On Thu, 2018-11-15@13:29 +0100, Hannes Reinecke wrote:
> Hi all,
> 
> after my NUMA path balancing patch hasn't met with universal approval,
> here's now my take on a 'real' round-robin I/O scheduler for NVMe multipathing.
> With this patch I'm able to boost I/O performance from 127k/127k randrw
> to 156k/156k randrw (as measured by fio).
> Testbed was two dual 32G FC HBAs connected back-to-back with nvmet
> against a 1G zram-backed namespace.
> 
> As usual, comments and reviews are welcome.
> 
> Hannes Reinecke (2):
>   nvme-multipath: add 'iopolicy' subsystem attribute
>   nvme-multipath: round-robin I/O policy
> 
>  drivers/nvme/host/core.c      |  6 +++
>  drivers/nvme/host/multipath.c | 88 ++++++++++++++++++++++++++++++++++++++++++-
>  drivers/nvme/host/nvme.h      |  8 ++++
>  3 files changed, 101 insertions(+), 1 deletion(-)
> 

Tested this with dual-port  NVMe/FC 32Gb fabric to nvmet w/ 2TB NVMe PCI storage.

It works like you'd expect.  Path balancing was exact (within 1 I/O).
My storage configuration did not show a performance improvement from
using both paths, I'll try it again later with a higher performance
storage array.

However...

When I took both paths down, I get this, which does not happen if iopolicy
is set to "numa" rather than "round-robin".  (With "numa", I get the usual
"block nvme1n1: no path available - requeuing I/O" message.)  Restoring
both paths does not clear the lockup.  The hang looks like it is in the
nvme_find_path() or __nvme_next_path() inline.

-Ewan

 kernel:watchdog: BUG: soft lockup - CPU#7 stuck for 23s! [jbd2/nvme1n1-8:1800]
 kernel:watchdog: BUG: soft lockup - CPU#13 stuck for 23s! [fio:1866]
[  650.816208] watchdog: BUG: soft lockup - CPU#7 stuck for 23s! [jbd2/nvme1n1-8:1800]
[  650.824209] watchdog: BUG: soft lockup - CPU#13 stuck for 23s! [fio:1866]
[  650.824751] Modules linked in: ext4 mbcache jbd2 intel_rapl sb_edac x86_pkg_temp_thermal intel_powerclamd
[  650.832322] Modules linked in: ext4 mbcache jbd2 intel_rapl sb_edac x86_pkg_temp_thermal intel_powerclamd
[  650.897781] CPU: 7 PID: 1800 Comm: jbd2/nvme1n1-8 Tainted: G             L    4.20.0-rc1+ #3
[  650.963241] CPU: 13 PID: 1866 Comm: fio Tainted: G             L    4.20.0-rc1+ #3
[  650.972654] Hardware name: Dell Inc. PowerEdge R730/072T6D, BIOS 2.3.4 11/08/2016
[  650.972657] RIP: 0010:nvme_ns_head_make_request+0x1b4/0x2a0 [nvme_core]
[  650.981099] Hardware name: Dell Inc. PowerEdge R730/072T6D, BIOS 2.3.4 11/08/2016
[  650.981103] RIP: 0010:nvme_ns_head_make_request+0x1a0/0x2a0 [nvme_core]
[  650.989450] Code: 01 75 ce 48 89 0f e9 e8 fe ff ff 49 8b 16 4c 39 f2 48 89 54 24 08 0f 84 c3 fe ff ff 48f
[  650.996827] Code: 08 48 83 e9 30 74 18 48 8b 51 10 83 7a 04 01 75 d4 83 79 28 01 75 ce 48 89 0f e9 e8 fe5
[  651.005174] RSP: 0018:ffffb6a18485fbd0 EFLAGS: 00000283 ORIG_RAX: ffffffffffffff13
[  651.012552] RSP: 0018:ffffb6a18408f9d0 EFLAGS: 00000202 ORIG_RAX: ffffffffffffff13
[  651.033519] RAX: ffff95af620313f8 RBX: ffff95af736c34c0 RCX: ffff95af842b01d8
[  651.033521] RDX: ffff95af842b0208 RSI: 0000000000000000 RDI: ffff95af64a8c560
[  651.054491] RAX: ffff95af620313f8 RBX: ffff95af736c0540 RCX: ffff95af64a80000
[  651.054492] RDX: ffff95af842b0208 RSI: 0000000000000000 RDI: ffff95af64a8c560
[  651.062937] RBP: ffff95af64a80010 R08: 0000000000000000 R09: 0000000000000001
[  651.062938] R10: ffff95af9f4bd640 R11: ffff95af736c2340 R12: ffff95af81bb2f88
[  651.071383] RBP: ffff95af64a80010 R08: 0000000000000000 R09: 0000000000000001
[  651.071385] R10: ffff95af9f4bd640 R11: 0000000000000000 R12: ffff95af81bb2f88
[  651.079344] R13: ffff95af90aeefc8 R14: ffff95af64a80000 R15: 0000000000000000
[  651.079346] FS:  0000000000000000(0000) GS:ffff95af9f9c0000(0000) knlGS:0000000000000000
[  651.087303] R13: ffff95af90aeefc8 R14: ffff95af64a80000 R15: ffff95af64c98040
[  651.087306] FS:  00007f31c3686740(0000) GS:ffff95af9fb40000(0000) knlGS:0000000000000000
[  651.095265] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  651.095267] CR2: 00007f1cf6740000 CR3: 00000004d6c0a006 CR4: 00000000003606e0
[  651.103240] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  651.103242] CR2: 00007f31a4296128 CR3: 0000000826c3a002 CR4: 00000000003606e0
[  651.111201] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[  651.119160] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[  651.127118] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[  651.127119] Call Trace:
[  651.135079] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[  651.135080] Call Trace:
[  651.143040]  generic_make_request+0x197/0x3c0
[  651.152066]  generic_make_request+0x197/0x3c0
[  651.160025]  submit_bio+0x6e/0x130
[  651.169052]  submit_bio+0x6e/0x130
[  651.175462]  ? guard_bio_eod+0x32/0xb0
[  651.183423]  ? bio_add_page+0x48/0x60
[  651.189832]  submit_bh_wbc+0x157/0x190
[  651.197792]  do_blockdev_direct_IO+0x224e/0x25c0
[  651.205754]  jbd2_journal_commit_transaction+0x636/0x1aa0 [jbd2]
[  651.213714]  ? jbd2_journal_stop+0x1e3/0x3f0 [jbd2]
[  651.221672]  ? __switch_to_asm+0x40/0x70
[  651.224405]  ? ext4_dio_get_block_unwritten_async+0x90/0x90 [ext4]
[  651.232358]  ? __switch_to_asm+0x34/0x70
[  651.235087]  ext4_direct_IO+0x302/0x6c0 [ext4]
[  651.239940]  ? __switch_to+0xee/0x470
[  651.244796]  generic_file_direct_write+0xcc/0x170
[  651.248587]  ? __switch_to_asm+0x34/0x70
[  651.252379]  __generic_file_write_iter+0xb7/0x1c0
[  651.256559]  ? try_to_del_timer_sync+0x4d/0x80
[  651.260645]  ext4_file_write_iter+0xc6/0x410 [ext4]
[  651.264821]  kjournald2+0xc1/0x260 [jbd2]
[  651.269967]  __vfs_write+0x112/0x1a0
[  651.276666]  ? remove_wait_queue+0x60/0x60
[  651.282105]  vfs_write+0xad/0x1a0
[  651.286477]  kthread+0xf8/0x130
[  651.293370]  ksys_pwrite64+0x62/0x90
[  651.297744]  ? commit_timeout+0x10/0x10 [jbd2]
[  651.302696]  do_syscall_64+0x5b/0x180
[  651.306778]  ? kthread_stop+0x110/0x110
[  651.312023]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  651.316396]  ret_from_fork+0x35/0x40
[  651.321640] RIP: 0033:0x7f31c2b48d63
[  651.383240] Code: 49 89 ca b8 12 00 00 00 0f 05 48 3d 01 f0 ff ff 73 34 c3 48 83 ec 08 e8 5b f3 ff ff 481
[  651.404193] RSP: 002b:00007ffd5347e0e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000012
[  651.412638] RAX: ffffffffffffffda RBX: 0000000000dc6500 RCX: 00007f31c2b48d63
[  651.420600] RDX: 0000000000000200 RSI: 00007f31c3674e00 RDI: 0000000000000003
[  651.428559] RBP: 00007f31a42916a0 R08: 0000000000000000 R09: 00007f31bee651f0
[  651.436520] R10: 000000000480fe00 R11: 0000000000000293 R12: 00007f31a42916a8
[  651.444515] R13: 0000000000000200 R14: 0000000000dc6528 R15: 0000000000dc6510

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2018-12-05 20:05 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-15 12:29 [PATCH 0/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
2018-11-15 12:29 ` [PATCH 1/2] nvme-multipath: add 'iopolicy' subsystem attribute Hannes Reinecke
2018-11-15 17:35   ` Sagi Grimberg
2018-11-16  8:07     ` Hannes Reinecke
2018-11-15 12:29 ` [PATCH 2/2] nvme-multipath: round-robin I/O policy Hannes Reinecke
2018-11-20 16:42   ` Christoph Hellwig
2018-11-20 20:30     ` Hannes Reinecke
2018-11-21  8:28       ` Christoph Hellwig
2018-11-21 11:24         ` Hannes Reinecke
     [not found]       ` <8a583536-151e-6f68-f4f9-98d8c4b853dd@broadcom.com>
2018-11-21 11:09         ` Hannes Reinecke
2018-11-22 13:52         ` Hannes Reinecke
2018-11-16  8:26 ` [PATCH 0/2] " Christoph Hellwig
2018-11-20 16:02   ` Hannes Reinecke
2018-11-20 16:19     ` Christoph Hellwig
2018-12-05 20:05 ` Ewan D. Milne

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.