All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V2] IB/hfi1,IB/qib: Fix qp_stats sleep with rcu read lock held
@ 2016-08-12 15:17 ` ira.weiny
  0 siblings, 0 replies; 4+ messages in thread
From: ira.weiny @ 2016-08-12 15:17 UTC (permalink / raw)
  To: dledford; +Cc: linux-rdma, Mike Marciniszyn, Ira Weiny, stable

From: Mike Marciniszyn <mike.marciniszyn@intel.com>

The qp init function does a kzalloc() while holding the RCU
lock that encounters the following warning with a debug kernel
when a cat of the qp_stats is done:

[  231.723948] rcu_scheduler_active = 1, debug_locks = 0
[  231.731939] 3 locks held by cat/11355:
[  231.736492]  #0:  (debugfs_srcu){......}, at: [<ffffffff813001a5>] debugfs_use_file_start+0x5/0x90
[  231.746955]  #1:  (&p->lock){+.+.+.}, at: [<ffffffff81289a6c>] seq_read+0x4c/0x3c0
[  231.755873]  #2:  (rcu_read_lock){......}, at: [<ffffffffa0a0c535>] _qp_stats_seq_start+0x5/0xd0 [hfi1]
[  231.766862]

The init functions do an implicit next which requires the rcu read lock
before the kzalloc().

Fix for both drivers is to change the scope of the init function to only
do the allocation and the initialization of the just allocated iter.

The implict next is moved back into the respective start functions to fix
the issue.

Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
CC: <stable@vger.kernel.org> # 4.6.x-

---
Changes from V1:
	use do.. while loop to execute first iter call.

 drivers/infiniband/hw/hfi1/debugfs.c    | 14 +++++++++-----
 drivers/infiniband/hw/hfi1/qp.c         |  4 ----
 drivers/infiniband/hw/qib/qib_debugfs.c | 12 +++++++++---
 drivers/infiniband/hw/qib/qib_qp.c      |  4 ----
 4 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc288..a49cc88f08a2 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
 DEBUGFS_FILE_OPS(ctx_stats);
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
+	__acquires(RCU)
 {
 	struct qp_iter *iter;
 	loff_t n = *pos;
 
-	rcu_read_lock();
 	iter = qp_iter_init(s->private);
+
+	/* stop calls rcu_read_unlock */
+	rcu_read_lock();
+
 	if (!iter)
 		return NULL;
 
-	while (n--) {
+	do {
 		if (qp_iter_next(iter)) {
 			kfree(iter);
 			return NULL;
 		}
-	}
+	} while (n--);
 
 	return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 				loff_t *pos)
+	__must_hold(RCU)
 {
 	struct qp_iter *iter = iter_ptr;
 
@@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
+	__releases(RCU)
 {
 	rcu_read_unlock();
 }
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index a5aa3517e7d5..4e4d8317c281 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
 
 	iter->dev = dev;
 	iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
-	if (qp_iter_next(iter)) {
-		kfree(iter);
-		return NULL;
-	}
 
 	return iter;
 }
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 5e75b43c596b..5bad8e3b40bb 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
 DEBUGFS_FILE(ctx_stats)
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+	__acquires(RCU)
 {
 	struct qib_qp_iter *iter;
 	loff_t n = *pos;
 
-	rcu_read_lock();
 	iter = qib_qp_iter_init(s->private);
+
+	/* stop calls rcu_read_unlock */
+	rcu_read_lock();
+
 	if (!iter)
 		return NULL;
 
-	while (n--) {
+	do {
 		if (qib_qp_iter_next(iter)) {
 			kfree(iter);
 			return NULL;
 		}
-	}
+	} while (n--);
 
 	return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 				   loff_t *pos)
+	__must_hold(RCU)
 {
 	struct qib_qp_iter *iter = iter_ptr;
 
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+	__releases(RCU)
 {
 	rcu_read_unlock();
 }
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 9cc0aae1d781..f9b8cd2354d1 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
 		return NULL;
 
 	iter->dev = dev;
-	if (qib_qp_iter_next(iter)) {
-		kfree(iter);
-		return NULL;
-	}
 
 	return iter;
 }
-- 
1.8.2

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH V2] IB/hfi1,IB/qib: Fix qp_stats sleep with rcu read lock held
@ 2016-08-12 15:17 ` ira.weiny
  0 siblings, 0 replies; 4+ messages in thread
From: ira.weiny @ 2016-08-12 15:17 UTC (permalink / raw)
  To: dledford; +Cc: linux-rdma, Mike Marciniszyn, Ira Weiny, stable

From: Mike Marciniszyn <mike.marciniszyn@intel.com>

The qp init function does a kzalloc() while holding the RCU
lock that encounters the following warning with a debug kernel
when a cat of the qp_stats is done:

[  231.723948] rcu_scheduler_active = 1, debug_locks = 0
[  231.731939] 3 locks held by cat/11355:
[  231.736492]  #0:  (debugfs_srcu){......}, at: [<ffffffff813001a5>] debugfs_use_file_start+0x5/0x90
[  231.746955]  #1:  (&p->lock){+.+.+.}, at: [<ffffffff81289a6c>] seq_read+0x4c/0x3c0
[  231.755873]  #2:  (rcu_read_lock){......}, at: [<ffffffffa0a0c535>] _qp_stats_seq_start+0x5/0xd0 [hfi1]
[  231.766862]

The init functions do an implicit next which requires the rcu read lock
before the kzalloc().

Fix for both drivers is to change the scope of the init function to only
do the allocation and the initialization of the just allocated iter.

The implict next is moved back into the respective start functions to fix
the issue.

Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
CC: <stable@vger.kernel.org> # 4.6.x-

---
Changes from V1:
	use do.. while loop to execute first iter call.

 drivers/infiniband/hw/hfi1/debugfs.c    | 14 +++++++++-----
 drivers/infiniband/hw/hfi1/qp.c         |  4 ----
 drivers/infiniband/hw/qib/qib_debugfs.c | 12 +++++++++---
 drivers/infiniband/hw/qib/qib_qp.c      |  4 ----
 4 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc288..a49cc88f08a2 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
 DEBUGFS_FILE_OPS(ctx_stats);
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
+	__acquires(RCU)
 {
 	struct qp_iter *iter;
 	loff_t n = *pos;
 
-	rcu_read_lock();
 	iter = qp_iter_init(s->private);
+
+	/* stop calls rcu_read_unlock */
+	rcu_read_lock();
+
 	if (!iter)
 		return NULL;
 
-	while (n--) {
+	do {
 		if (qp_iter_next(iter)) {
 			kfree(iter);
 			return NULL;
 		}
-	}
+	} while (n--);
 
 	return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 				loff_t *pos)
+	__must_hold(RCU)
 {
 	struct qp_iter *iter = iter_ptr;
 
@@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
+	__releases(RCU)
 {
 	rcu_read_unlock();
 }
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index a5aa3517e7d5..4e4d8317c281 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
 
 	iter->dev = dev;
 	iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
-	if (qp_iter_next(iter)) {
-		kfree(iter);
-		return NULL;
-	}
 
 	return iter;
 }
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 5e75b43c596b..5bad8e3b40bb 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
 DEBUGFS_FILE(ctx_stats)
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+	__acquires(RCU)
 {
 	struct qib_qp_iter *iter;
 	loff_t n = *pos;
 
-	rcu_read_lock();
 	iter = qib_qp_iter_init(s->private);
+
+	/* stop calls rcu_read_unlock */
+	rcu_read_lock();
+
 	if (!iter)
 		return NULL;
 
-	while (n--) {
+	do {
 		if (qib_qp_iter_next(iter)) {
 			kfree(iter);
 			return NULL;
 		}
-	}
+	} while (n--);
 
 	return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 				   loff_t *pos)
+	__must_hold(RCU)
 {
 	struct qib_qp_iter *iter = iter_ptr;
 
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+	__releases(RCU)
 {
 	rcu_read_unlock();
 }
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 9cc0aae1d781..f9b8cd2354d1 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
 		return NULL;
 
 	iter->dev = dev;
-	if (qib_qp_iter_next(iter)) {
-		kfree(iter);
-		return NULL;
-	}
 
 	return iter;
 }
-- 
1.8.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH V2] IB/hfi1,IB/qib: Fix qp_stats sleep with rcu read lock held
  2016-08-12 15:17 ` ira.weiny
  (?)
@ 2016-08-14  8:55 ` Leon Romanovsky
  2016-08-22 18:32   ` Doug Ledford
  -1 siblings, 1 reply; 4+ messages in thread
From: Leon Romanovsky @ 2016-08-14  8:55 UTC (permalink / raw)
  To: ira.weiny; +Cc: dledford, linux-rdma, Mike Marciniszyn, stable

[-- Attachment #1: Type: text/plain, Size: 1384 bytes --]

On Fri, Aug 12, 2016 at 11:17:37AM -0400, ira.weiny@intel.com wrote:
> From: Mike Marciniszyn <mike.marciniszyn@intel.com>
> 
> The qp init function does a kzalloc() while holding the RCU
> lock that encounters the following warning with a debug kernel
> when a cat of the qp_stats is done:
> 
> [  231.723948] rcu_scheduler_active = 1, debug_locks = 0
> [  231.731939] 3 locks held by cat/11355:
> [  231.736492]  #0:  (debugfs_srcu){......}, at: [<ffffffff813001a5>] debugfs_use_file_start+0x5/0x90
> [  231.746955]  #1:  (&p->lock){+.+.+.}, at: [<ffffffff81289a6c>] seq_read+0x4c/0x3c0
> [  231.755873]  #2:  (rcu_read_lock){......}, at: [<ffffffffa0a0c535>] _qp_stats_seq_start+0x5/0xd0 [hfi1]
> [  231.766862]
> 
> The init functions do an implicit next which requires the rcu read lock
> before the kzalloc().
> 
> Fix for both drivers is to change the scope of the init function to only
> do the allocation and the initialization of the just allocated iter.
> 
> The implict next is moved back into the respective start functions to fix
> the issue.
> 
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
> CC: <stable@vger.kernel.org> # 4.6.x-
> 
> ---
> Changes from V1:
> 	use do.. while loop to execute first iter call.

Thanks,
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH V2] IB/hfi1,IB/qib: Fix qp_stats sleep with rcu read lock held
  2016-08-14  8:55 ` Leon Romanovsky
@ 2016-08-22 18:32   ` Doug Ledford
  0 siblings, 0 replies; 4+ messages in thread
From: Doug Ledford @ 2016-08-22 18:32 UTC (permalink / raw)
  To: Leon Romanovsky, ira.weiny; +Cc: linux-rdma, Mike Marciniszyn, stable


[-- Attachment #1.1: Type: text/plain, Size: 1596 bytes --]

On 8/14/2016 4:55 AM, Leon Romanovsky wrote:
> On Fri, Aug 12, 2016 at 11:17:37AM -0400, ira.weiny@intel.com wrote:
>> From: Mike Marciniszyn <mike.marciniszyn@intel.com>
>>
>> The qp init function does a kzalloc() while holding the RCU
>> lock that encounters the following warning with a debug kernel
>> when a cat of the qp_stats is done:
>>
>> [  231.723948] rcu_scheduler_active = 1, debug_locks = 0
>> [  231.731939] 3 locks held by cat/11355:
>> [  231.736492]  #0:  (debugfs_srcu){......}, at: [<ffffffff813001a5>] debugfs_use_file_start+0x5/0x90
>> [  231.746955]  #1:  (&p->lock){+.+.+.}, at: [<ffffffff81289a6c>] seq_read+0x4c/0x3c0
>> [  231.755873]  #2:  (rcu_read_lock){......}, at: [<ffffffffa0a0c535>] _qp_stats_seq_start+0x5/0xd0 [hfi1]
>> [  231.766862]
>>
>> The init functions do an implicit next which requires the rcu read lock
>> before the kzalloc().
>>
>> Fix for both drivers is to change the scope of the init function to only
>> do the allocation and the initialization of the just allocated iter.
>>
>> The implict next is moved back into the respective start functions to fix
>> the issue.
>>
>> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
>> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
>> CC: <stable@vger.kernel.org> # 4.6.x-
>>
>> ---
>> Changes from V1:
>> 	use do.. while loop to execute first iter call.
> 
> Thanks,
> Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
> 

Sorry, responded to v1 email, but I applied the v2 patch.

-- 
Doug Ledford <dledford@redhat.com>
    GPG Key ID: 0E572FDD


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 884 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2016-08-22 18:32 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-12 15:17 [PATCH V2] IB/hfi1,IB/qib: Fix qp_stats sleep with rcu read lock held ira.weiny
2016-08-12 15:17 ` ira.weiny
2016-08-14  8:55 ` Leon Romanovsky
2016-08-22 18:32   ` Doug Ledford

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.