All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: tglx@linutronix.de, axboe@fb.com
Cc: linux-block@vger.kernel.org, linux-pci@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org
Subject: [PATCH 02/13] irq: Introduce IRQD_AFFINITY_MANAGED flag
Date: Tue, 14 Jun 2016 21:58:55 +0200	[thread overview]
Message-ID: <1465934346-20648-3-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1465934346-20648-1-git-send-email-hch@lst.de>

From: Thomas Gleixner <tglx@linutronix.de>

Interupts marked with this flag are excluded from user space interrupt
affinity changes. Contrary to the IRQ_NO_BALANCING flag, the kernel internal
affinity mechanism is not blocked.

This flag will be used for multi-queue device interrupts.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/irq.h    |  7 +++++++
 kernel/irq/internals.h |  2 ++
 kernel/irq/manage.c    | 21 ++++++++++++++++++---
 kernel/irq/proc.c      |  2 +-
 4 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4d758a7..49d66d1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -197,6 +197,7 @@ struct irq_data {
  * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt
  * IRQD_WAKEUP_ARMED		- Wakeup mode armed
  * IRQD_FORWARDED_TO_VCPU	- The interrupt is forwarded to a VCPU
+ * IRQD_AFFINITY_MANAGED	- Affinity is managed automatically
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -212,6 +213,7 @@ enum {
 	IRQD_IRQ_INPROGRESS		= (1 << 18),
 	IRQD_WAKEUP_ARMED		= (1 << 19),
 	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
+	IRQD_AFFINITY_MANAGED		= (1 << 21),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
 	__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
+static inline bool irqd_affinity_is_managed(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 09be2c9..b15aa3b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
 					   struct irqaction *action) { }
 #endif
 
+extern bool irq_can_set_affinity_usr(unsigned int irq);
+
 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ef0bc02..30658e9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
 #ifdef CONFIG_SMP
 cpumask_var_t irq_default_affinity;
 
-static int __irq_can_set_affinity(struct irq_desc *desc)
+static bool __irq_can_set_affinity(struct irq_desc *desc)
 {
 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 /**
@@ -134,6 +134,21 @@ int irq_can_set_affinity(unsigned int irq)
 }
 
 /**
+ * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
+ * @irq:	Interrupt to check
+ *
+ * Like irq_can_set_affinity() above, but additionally checks for the
+ * AFFINITY_MANAGED flag.
+ */
+bool irq_can_set_affinity_usr(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return __irq_can_set_affinity(desc) &&
+		!irqd_affinity_is_managed(&desc->irq_data);
+}
+
+/**
  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
  *	@desc:		irq descriptor which has affitnity changed
  *
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4e1b947..40bdcdc 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
 	cpumask_var_t new_value;
 	int err;
 
-	if (!irq_can_set_affinity(irq) || no_irq_affinity)
+	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
 		return -EIO;
 
 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
-- 
2.1.4


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: tglx@linutronix.de, axboe@fb.com
Cc: linux-block@vger.kernel.org, linux-pci@vger.kernel.org,
	linux-nvme@lists.infradead.org, linux-kernel@vger.kernel.org
Subject: [PATCH 02/13] irq: Introduce IRQD_AFFINITY_MANAGED flag
Date: Tue, 14 Jun 2016 21:58:55 +0200	[thread overview]
Message-ID: <1465934346-20648-3-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1465934346-20648-1-git-send-email-hch@lst.de>

From: Thomas Gleixner <tglx@linutronix.de>

Interupts marked with this flag are excluded from user space interrupt
affinity changes. Contrary to the IRQ_NO_BALANCING flag, the kernel internal
affinity mechanism is not blocked.

This flag will be used for multi-queue device interrupts.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/irq.h    |  7 +++++++
 kernel/irq/internals.h |  2 ++
 kernel/irq/manage.c    | 21 ++++++++++++++++++---
 kernel/irq/proc.c      |  2 +-
 4 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4d758a7..49d66d1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -197,6 +197,7 @@ struct irq_data {
  * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt
  * IRQD_WAKEUP_ARMED		- Wakeup mode armed
  * IRQD_FORWARDED_TO_VCPU	- The interrupt is forwarded to a VCPU
+ * IRQD_AFFINITY_MANAGED	- Affinity is managed automatically
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -212,6 +213,7 @@ enum {
 	IRQD_IRQ_INPROGRESS		= (1 << 18),
 	IRQD_WAKEUP_ARMED		= (1 << 19),
 	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
+	IRQD_AFFINITY_MANAGED		= (1 << 21),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
 	__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
+static inline bool irqd_affinity_is_managed(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 09be2c9..b15aa3b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
 					   struct irqaction *action) { }
 #endif
 
+extern bool irq_can_set_affinity_usr(unsigned int irq);
+
 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ef0bc02..30658e9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
 #ifdef CONFIG_SMP
 cpumask_var_t irq_default_affinity;
 
-static int __irq_can_set_affinity(struct irq_desc *desc)
+static bool __irq_can_set_affinity(struct irq_desc *desc)
 {
 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 /**
@@ -134,6 +134,21 @@ int irq_can_set_affinity(unsigned int irq)
 }
 
 /**
+ * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
+ * @irq:	Interrupt to check
+ *
+ * Like irq_can_set_affinity() above, but additionally checks for the
+ * AFFINITY_MANAGED flag.
+ */
+bool irq_can_set_affinity_usr(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return __irq_can_set_affinity(desc) &&
+		!irqd_affinity_is_managed(&desc->irq_data);
+}
+
+/**
  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
  *	@desc:		irq descriptor which has affitnity changed
  *
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4e1b947..40bdcdc 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
 	cpumask_var_t new_value;
 	int err;
 
-	if (!irq_can_set_affinity(irq) || no_irq_affinity)
+	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
 		return -EIO;
 
 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
-- 
2.1.4

WARNING: multiple messages have this Message-ID (diff)
From: hch@lst.de (Christoph Hellwig)
Subject: [PATCH 02/13] irq: Introduce IRQD_AFFINITY_MANAGED flag
Date: Tue, 14 Jun 2016 21:58:55 +0200	[thread overview]
Message-ID: <1465934346-20648-3-git-send-email-hch@lst.de> (raw)
In-Reply-To: <1465934346-20648-1-git-send-email-hch@lst.de>

From: Thomas Gleixner <tglx@linutronix.de>

Interupts marked with this flag are excluded from user space interrupt
affinity changes. Contrary to the IRQ_NO_BALANCING flag, the kernel internal
affinity mechanism is not blocked.

This flag will be used for multi-queue device interrupts.

Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
 include/linux/irq.h    |  7 +++++++
 kernel/irq/internals.h |  2 ++
 kernel/irq/manage.c    | 21 ++++++++++++++++++---
 kernel/irq/proc.c      |  2 +-
 4 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4d758a7..49d66d1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -197,6 +197,7 @@ struct irq_data {
  * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt
  * IRQD_WAKEUP_ARMED		- Wakeup mode armed
  * IRQD_FORWARDED_TO_VCPU	- The interrupt is forwarded to a VCPU
+ * IRQD_AFFINITY_MANAGED	- Affinity is managed automatically
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -212,6 +213,7 @@ enum {
 	IRQD_IRQ_INPROGRESS		= (1 << 18),
 	IRQD_WAKEUP_ARMED		= (1 << 19),
 	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
+	IRQD_AFFINITY_MANAGED		= (1 << 21),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
 	__irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
+static inline bool irqd_affinity_is_managed(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 09be2c9..b15aa3b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
 					   struct irqaction *action) { }
 #endif
 
+extern bool irq_can_set_affinity_usr(unsigned int irq);
+
 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ef0bc02..30658e9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
 #ifdef CONFIG_SMP
 cpumask_var_t irq_default_affinity;
 
-static int __irq_can_set_affinity(struct irq_desc *desc)
+static bool __irq_can_set_affinity(struct irq_desc *desc)
 {
 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 /**
@@ -134,6 +134,21 @@ int irq_can_set_affinity(unsigned int irq)
 }
 
 /**
+ * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
+ * @irq:	Interrupt to check
+ *
+ * Like irq_can_set_affinity() above, but additionally checks for the
+ * AFFINITY_MANAGED flag.
+ */
+bool irq_can_set_affinity_usr(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return __irq_can_set_affinity(desc) &&
+		!irqd_affinity_is_managed(&desc->irq_data);
+}
+
+/**
  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
  *	@desc:		irq descriptor which has affitnity changed
  *
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 4e1b947..40bdcdc 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
 	cpumask_var_t new_value;
 	int err;
 
-	if (!irq_can_set_affinity(irq) || no_irq_affinity)
+	if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
 		return -EIO;
 
 	if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
-- 
2.1.4

  parent reply	other threads:[~2016-06-14 19:58 UTC|newest]

Thread overview: 132+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-14 19:58 automatic interrupt affinity for MSI/MSI-X capable devices V2 Christoph Hellwig
2016-06-14 19:58 ` Christoph Hellwig
2016-06-14 19:58 ` [PATCH 01/13] irq/msi: Remove unused MSI_FLAG_IDENTITY_MAP Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-16  9:05   ` Bart Van Assche
2016-06-16  9:05     ` Bart Van Assche
2016-06-14 19:58 ` Christoph Hellwig [this message]
2016-06-14 19:58   ` [PATCH 02/13] irq: Introduce IRQD_AFFINITY_MANAGED flag Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-15  8:44   ` Bart Van Assche
2016-06-15  8:44     ` Bart Van Assche
2016-06-15 10:23     ` Christoph Hellwig
2016-06-15 10:23       ` Christoph Hellwig
2016-06-15 10:42       ` Bart Van Assche
2016-06-15 10:42         ` Bart Van Assche
2016-06-15 10:42         ` Bart Van Assche
2016-06-15 15:14         ` Keith Busch
2016-06-15 15:14           ` Keith Busch
2016-06-15 15:28           ` Bart Van Assche
2016-06-15 15:28             ` Bart Van Assche
2016-06-15 16:03             ` Keith Busch
2016-06-15 16:03               ` Keith Busch
2016-06-15 19:36               ` Bart Van Assche
2016-06-15 19:36                 ` Bart Van Assche
2016-06-15 20:06                 ` Keith Busch
2016-06-15 20:06                   ` Keith Busch
2016-06-15 20:12                   ` Keith Busch
2016-06-15 20:12                     ` Keith Busch
2016-06-15 20:50                     ` Bart Van Assche
2016-06-15 20:50                       ` Bart Van Assche
2016-06-16 15:19                       ` Keith Busch
2016-06-16 15:19                         ` Keith Busch
2016-06-22 11:56                         ` Alexander Gordeev
2016-06-22 11:56                           ` Alexander Gordeev
2016-06-22 11:56                           ` Alexander Gordeev
2016-06-16 15:20                 ` Christoph Hellwig
2016-06-16 15:20                   ` Christoph Hellwig
2016-06-16 15:39                   ` Bart Van Assche
2016-06-16 15:39                     ` Bart Van Assche
2016-06-20 12:22                     ` Christoph Hellwig
2016-06-20 12:22                       ` Christoph Hellwig
2016-06-20 12:22                       ` Christoph Hellwig
2016-06-20 13:21                       ` Bart Van Assche
2016-06-20 13:21                         ` Bart Van Assche
2016-06-20 13:21                         ` Bart Van Assche
2016-06-21 14:31                         ` Christoph Hellwig
2016-06-21 14:31                           ` Christoph Hellwig
2016-06-21 14:31                           ` Christoph Hellwig
2016-06-16  9:08   ` Bart Van Assche
2016-06-16  9:08     ` Bart Van Assche
2016-06-14 19:58 ` [PATCH 03/13] irq: Add affinity hint to irq allocation Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58 ` [PATCH 04/13] irq: Use affinity hint in irqdesc allocation Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58 ` [PATCH 05/13] irq/msi: Make use of affinity aware allocations Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58 ` [PATCH 06/13] irq: add a helper spread an affinity mask for MSI/MSI-X vectors Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 19:58   ` Christoph Hellwig
2016-06-14 21:54   ` Guilherme G. Piccoli
2016-06-14 21:54     ` Guilherme G. Piccoli
2016-06-15  8:35     ` Bart Van Assche
2016-06-15  8:35       ` Bart Van Assche
2016-06-15  8:35       ` Bart Van Assche
2016-06-15 10:10     ` Christoph Hellwig
2016-06-15 10:10       ` Christoph Hellwig
2016-06-15 13:09       ` Guilherme G. Piccoli
2016-06-15 13:09         ` Guilherme G. Piccoli
2016-06-16 15:16         ` Christoph Hellwig
2016-06-16 15:16           ` Christoph Hellwig
2016-06-25 20:05   ` Alexander Gordeev
2016-06-25 20:05     ` Alexander Gordeev
2016-06-30 17:48     ` Christoph Hellwig
2016-06-30 17:48       ` Christoph Hellwig
2016-06-30 17:48       ` Christoph Hellwig
2016-07-01  7:25       ` Alexander Gordeev
2016-07-01  7:25         ` Alexander Gordeev
2016-06-14 19:59 ` [PATCH 07/13] pci: Provide sensible irq vector alloc/free routines Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-23 11:16   ` Alexander Gordeev
2016-06-23 11:16     ` Alexander Gordeev
2016-06-30 16:54     ` Christoph Hellwig
2016-06-30 16:54       ` Christoph Hellwig
2016-06-30 17:28       ` Alexander Gordeev
2016-06-30 17:28         ` Alexander Gordeev
2016-06-30 17:35         ` Christoph Hellwig
2016-06-30 17:35           ` Christoph Hellwig
2016-06-14 19:59 ` [PATCH 08/13] pci: spread interrupt vectors in pci_alloc_irq_vectors Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-25 20:22   ` Alexander Gordeev
2016-06-25 20:22     ` Alexander Gordeev
2016-06-14 19:59 ` [PATCH 09/13] blk-mq: don't redistribute hardware queues on a CPU hotplug event Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59 ` [PATCH 10/13] blk-mq: only allocate a single mq_map per tag_set Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59 ` [PATCH 11/13] blk-mq: allow the driver to pass in an affinity mask Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-07-04  8:15   ` Alexander Gordeev
2016-07-04  8:15     ` Alexander Gordeev
2016-07-04  8:38     ` Christoph Hellwig
2016-07-04  8:38       ` Christoph Hellwig
2016-07-04  9:35       ` Alexander Gordeev
2016-07-04  9:35         ` Alexander Gordeev
2016-07-10  3:41         ` Christoph Hellwig
2016-07-10  3:41           ` Christoph Hellwig
2016-07-12  6:42           ` Alexander Gordeev
2016-07-12  6:42             ` Alexander Gordeev
2016-06-14 19:59 ` [PATCH 12/13] nvme: switch to use pci_alloc_irq_vectors Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59 ` [PATCH 13/13] nvme: remove the post_scan callout Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-14 19:59   ` Christoph Hellwig
2016-06-16  9:45 ` automatic interrupt affinity for MSI/MSI-X capable devices V2 Bart Van Assche
2016-06-16  9:45   ` Bart Van Assche
2016-06-16  9:45   ` Bart Van Assche
2016-06-16 15:22   ` Christoph Hellwig
2016-06-16 15:22     ` Christoph Hellwig
2016-06-26 19:40 ` Alexander Gordeev
2016-06-26 19:40   ` Alexander Gordeev
2016-07-04  8:39 automatic interrupt affinity for MSI/MSI-X capable devices V3 Christoph Hellwig
2016-07-04  8:39 ` [PATCH 02/13] irq: Introduce IRQD_AFFINITY_MANAGED flag Christoph Hellwig
2016-07-04  8:39   ` Christoph Hellwig
2016-07-04  8:39   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1465934346-20648-3-git-send-email-hch@lst.de \
    --to=hch@lst.de \
    --cc=axboe@fb.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.