All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] blk-mq: convert atomic_t to refcount_t
@ 2019-03-22 14:48 Yufen Yu
  2019-03-22 14:48 ` [PATCH 1/2] blk-mq: convert hctx.nr_active " Yufen Yu
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Yufen Yu @ 2019-03-22 14:48 UTC (permalink / raw)
  To: axboe, dwindsor, peterz; +Cc: linux-block

Hi,

This patchset covert atomic variables (hctx.nr_active and blk_mq_tag.active_queues)
to newly provided refcount_t type and API, which can prevent accidental counter overflows
and underflows.


Yufen Yu (2):
  blk-mq: convert hctx.nr_active to refcount_t
  blk-mq: covert blk_mq_tag.active_queues to refcount_t

 block/blk-mq-debugfs.c | 4 ++--
 block/blk-mq-tag.c     | 8 ++++----
 block/blk-mq-tag.h     | 2 +-
 block/blk-mq.c         | 8 ++++----
 block/blk-mq.h         | 2 +-
 include/linux/blk-mq.h | 2 +-
 6 files changed, 13 insertions(+), 13 deletions(-)

-- 
2.16.2.dirty


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] blk-mq: convert hctx.nr_active to refcount_t
  2019-03-22 14:48 [PATCH 0/2] blk-mq: convert atomic_t to refcount_t Yufen Yu
@ 2019-03-22 14:48 ` Yufen Yu
  2019-03-22 15:01   ` Peter Zijlstra
  2019-03-22 14:48 ` [PATCH 2/2] blk-mq: covert blk_mq_tag.active_queues " Yufen Yu
  2019-03-22 15:23 ` [PATCH 0/2] blk-mq: convert atomic_t " Jens Axboe
  2 siblings, 1 reply; 9+ messages in thread
From: Yufen Yu @ 2019-03-22 14:48 UTC (permalink / raw)
  To: axboe, dwindsor, peterz; +Cc: linux-block

We convert 'nr_active' from atomic_t to newly provied
refcount_t type and API, which can prevent accidental counter
overflows and underflows.

Signed-off-by: Yufen Yu <yuyufen@huawei.com>
---
 block/blk-mq-debugfs.c | 2 +-
 block/blk-mq-tag.c     | 2 +-
 block/blk-mq.c         | 8 ++++----
 block/blk-mq.h         | 2 +-
 include/linux/blk-mq.h | 2 +-
 5 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index ec1d18cb643c..81536b7201be 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -637,7 +637,7 @@ static int hctx_active_show(void *data, struct seq_file *m)
 {
 	struct blk_mq_hw_ctx *hctx = data;
 
-	seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
+	seq_printf(m, "%d\n", refcount_read(&hctx->nr_active));
 	return 0;
 }
 
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index a4931fc7be8a..3fcb15fa6398 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -90,7 +90,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 	 * Allow at least some tags
 	 */
 	depth = max((bt->sb.depth + users - 1) / users, 4U);
-	return atomic_read(&hctx->nr_active) < depth;
+	return refcount_read(&hctx->nr_active) < depth;
 }
 
 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ea01c23b58a3..004773378209 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -300,7 +300,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 	} else {
 		if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
 			rq_flags = RQF_MQ_INFLIGHT;
-			atomic_inc(&data->hctx->nr_active);
+			refcount_inc(&data->hctx->nr_active);
 		}
 		rq->tag = tag;
 		rq->internal_tag = -1;
@@ -514,7 +514,7 @@ void blk_mq_free_request(struct request *rq)
 
 	ctx->rq_completed[rq_is_sync(rq)]++;
 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
-		atomic_dec(&hctx->nr_active);
+		refcount_dec(&hctx->nr_active);
 
 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
 		laptop_io_completion(q->backing_dev_info);
@@ -1055,7 +1055,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
 	if (rq->tag >= 0) {
 		if (shared) {
 			rq->rq_flags |= RQF_MQ_INFLIGHT;
-			atomic_inc(&data.hctx->nr_active);
+			refcount_inc(&data.hctx->nr_active);
 		}
 		data.hctx->tags->rqs[rq->tag] = rq;
 	}
@@ -2710,7 +2710,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
 		return NULL;
 	}
 
-	atomic_set(&hctx->nr_active, 0);
+	refcount_set(&hctx->nr_active, 0);
 	hctx->numa_node = node;
 	hctx->queue_num = hctx_idx;
 
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 60698b4c25a2..26089d7679a2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -218,7 +218,7 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
 
 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
-		atomic_dec(&hctx->nr_active);
+		refcount_dec(&hctx->nr_active);
 	}
 }
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b0c814bcc7e3..8868e56d7532 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -56,7 +56,7 @@ struct blk_mq_hw_ctx {
 	unsigned int		numa_node;
 	unsigned int		queue_num;
 
-	atomic_t		nr_active;
+	refcount_t		nr_active;
 	unsigned int		nr_expired;
 
 	struct hlist_node	cpuhp_dead;
-- 
2.16.2.dirty


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] blk-mq: covert blk_mq_tag.active_queues to refcount_t
  2019-03-22 14:48 [PATCH 0/2] blk-mq: convert atomic_t to refcount_t Yufen Yu
  2019-03-22 14:48 ` [PATCH 1/2] blk-mq: convert hctx.nr_active " Yufen Yu
@ 2019-03-22 14:48 ` Yufen Yu
  2019-03-22 15:02   ` Peter Zijlstra
  2019-03-22 15:23 ` [PATCH 0/2] blk-mq: convert atomic_t " Jens Axboe
  2 siblings, 1 reply; 9+ messages in thread
From: Yufen Yu @ 2019-03-22 14:48 UTC (permalink / raw)
  To: axboe, dwindsor, peterz; +Cc: linux-block

We convert 'active_queues' from atomic_t to newly provied
refcount_t type and API, which can prevent accidental counter
overflows and underflows.

Signed-off-by: Yufen Yu <yuyufen@huawei.com>
---
 block/blk-mq-debugfs.c | 2 +-
 block/blk-mq-tag.c     | 6 +++---
 block/blk-mq-tag.h     | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 81536b7201be..48f0cc2c90ba 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -473,7 +473,7 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
 	seq_printf(m, "active_queues=%d\n",
-		   atomic_read(&tags->active_queues));
+		   refcount_read(&tags->active_queues));
 
 	seq_puts(m, "\nbitmap_tags:\n");
 	sbitmap_queue_show(&tags->bitmap_tags, m);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 3fcb15fa6398..1d713f221bf7 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -31,7 +31,7 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-		atomic_inc(&hctx->tags->active_queues);
+		refcount_inc(&hctx->tags->active_queues);
 
 	return true;
 }
@@ -57,7 +57,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 		return;
 
-	atomic_dec(&tags->active_queues);
+	refcount_dec(&tags->active_queues);
 
 	blk_mq_tag_wakeup_all(tags, false);
 }
@@ -82,7 +82,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 	if (bt->sb.depth == 1)
 		return true;
 
-	users = atomic_read(&hctx->tags->active_queues);
+	users = refcount_read(&hctx->tags->active_queues);
 	if (!users)
 		return true;
 
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..e948b4833a2a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -11,7 +11,7 @@ struct blk_mq_tags {
 	unsigned int nr_tags;
 	unsigned int nr_reserved_tags;
 
-	atomic_t active_queues;
+	refcount_t active_queues;
 
 	struct sbitmap_queue bitmap_tags;
 	struct sbitmap_queue breserved_tags;
-- 
2.16.2.dirty


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] blk-mq: convert hctx.nr_active to refcount_t
  2019-03-22 14:48 ` [PATCH 1/2] blk-mq: convert hctx.nr_active " Yufen Yu
@ 2019-03-22 15:01   ` Peter Zijlstra
  2019-03-22 15:04     ` Peter Zijlstra
  0 siblings, 1 reply; 9+ messages in thread
From: Peter Zijlstra @ 2019-03-22 15:01 UTC (permalink / raw)
  To: Yufen Yu; +Cc: axboe, dwindsor, linux-block

On Fri, Mar 22, 2019 at 10:48:17PM +0800, Yufen Yu wrote:
> @@ -2710,7 +2710,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
>  		return NULL;
>  	}
>  
> -	atomic_set(&hctx->nr_active, 0);
> +	refcount_set(&hctx->nr_active, 0);
>  	hctx->numa_node = node;
>  	hctx->queue_num = hctx_idx;
>  

That looks bogus, refcount_t cannot inc-from-zero.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] blk-mq: covert blk_mq_tag.active_queues to refcount_t
  2019-03-22 14:48 ` [PATCH 2/2] blk-mq: covert blk_mq_tag.active_queues " Yufen Yu
@ 2019-03-22 15:02   ` Peter Zijlstra
  0 siblings, 0 replies; 9+ messages in thread
From: Peter Zijlstra @ 2019-03-22 15:02 UTC (permalink / raw)
  To: Yufen Yu; +Cc: axboe, dwindsor, linux-block

On Fri, Mar 22, 2019 at 10:48:18PM +0800, Yufen Yu wrote:
> We convert 'active_queues' from atomic_t to newly provied
> refcount_t type and API, which can prevent accidental counter
> overflows and underflows.

There is no initialization using refcount_set(), which leads me to
believe we're 0 initialized and then it's broken again.

> Signed-off-by: Yufen Yu <yuyufen@huawei.com>
> ---
>  block/blk-mq-debugfs.c | 2 +-
>  block/blk-mq-tag.c     | 6 +++---
>  block/blk-mq-tag.h     | 2 +-
>  3 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
> index 81536b7201be..48f0cc2c90ba 100644
> --- a/block/blk-mq-debugfs.c
> +++ b/block/blk-mq-debugfs.c
> @@ -473,7 +473,7 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
>  	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
>  	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
>  	seq_printf(m, "active_queues=%d\n",
> -		   atomic_read(&tags->active_queues));
> +		   refcount_read(&tags->active_queues));
>  
>  	seq_puts(m, "\nbitmap_tags:\n");
>  	sbitmap_queue_show(&tags->bitmap_tags, m);
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index 3fcb15fa6398..1d713f221bf7 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -31,7 +31,7 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
>  {
>  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
>  	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
> -		atomic_inc(&hctx->tags->active_queues);
> +		refcount_inc(&hctx->tags->active_queues);
>  
>  	return true;
>  }
> @@ -57,7 +57,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
>  	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
>  		return;
>  
> -	atomic_dec(&tags->active_queues);
> +	refcount_dec(&tags->active_queues);
>  
>  	blk_mq_tag_wakeup_all(tags, false);
>  }
> @@ -82,7 +82,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
>  	if (bt->sb.depth == 1)
>  		return true;
>  
> -	users = atomic_read(&hctx->tags->active_queues);
> +	users = refcount_read(&hctx->tags->active_queues);
>  	if (!users)
>  		return true;
>  
> diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
> index 61deab0b5a5a..e948b4833a2a 100644
> --- a/block/blk-mq-tag.h
> +++ b/block/blk-mq-tag.h
> @@ -11,7 +11,7 @@ struct blk_mq_tags {
>  	unsigned int nr_tags;
>  	unsigned int nr_reserved_tags;
>  
> -	atomic_t active_queues;
> +	refcount_t active_queues;
>  
>  	struct sbitmap_queue bitmap_tags;
>  	struct sbitmap_queue breserved_tags;
> -- 
> 2.16.2.dirty
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] blk-mq: convert hctx.nr_active to refcount_t
  2019-03-22 15:01   ` Peter Zijlstra
@ 2019-03-22 15:04     ` Peter Zijlstra
  2019-03-22 15:21       ` Jens Axboe
  0 siblings, 1 reply; 9+ messages in thread
From: Peter Zijlstra @ 2019-03-22 15:04 UTC (permalink / raw)
  To: Yufen Yu; +Cc: axboe, dwindsor, linux-block

On Fri, Mar 22, 2019 at 04:01:16PM +0100, Peter Zijlstra wrote:
> On Fri, Mar 22, 2019 at 10:48:17PM +0800, Yufen Yu wrote:
> > @@ -2710,7 +2710,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
> >  		return NULL;
> >  	}
> >  
> > -	atomic_set(&hctx->nr_active, 0);
> > +	refcount_set(&hctx->nr_active, 0);
> >  	hctx->numa_node = node;
> >  	hctx->queue_num = hctx_idx;
> >  
> 
> That looks bogus, refcount_t cannot inc-from-zero.

I also don't see a single dec_and_test in that patch, which leads me to
believe nr_active is not in fact a refcount.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] blk-mq: convert hctx.nr_active to refcount_t
  2019-03-22 15:04     ` Peter Zijlstra
@ 2019-03-22 15:21       ` Jens Axboe
  2019-03-23 12:30         ` yuyufen
  0 siblings, 1 reply; 9+ messages in thread
From: Jens Axboe @ 2019-03-22 15:21 UTC (permalink / raw)
  To: Peter Zijlstra, Yufen Yu; +Cc: dwindsor, linux-block

On 3/22/19 9:04 AM, Peter Zijlstra wrote:
> On Fri, Mar 22, 2019 at 04:01:16PM +0100, Peter Zijlstra wrote:
>> On Fri, Mar 22, 2019 at 10:48:17PM +0800, Yufen Yu wrote:
>>> @@ -2710,7 +2710,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
>>>  		return NULL;
>>>  	}
>>>  
>>> -	atomic_set(&hctx->nr_active, 0);
>>> +	refcount_set(&hctx->nr_active, 0);
>>>  	hctx->numa_node = node;
>>>  	hctx->queue_num = hctx_idx;
>>>  
>>
>> That looks bogus, refcount_t cannot inc-from-zero.
> 
> I also don't see a single dec_and_test in that patch, which leads me to
> believe nr_active is not in fact a refcount.

It isn't a refcount at all, it's just a count of how many queues are
active in a shared tag map scenario.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 0/2] blk-mq: convert atomic_t to refcount_t
  2019-03-22 14:48 [PATCH 0/2] blk-mq: convert atomic_t to refcount_t Yufen Yu
  2019-03-22 14:48 ` [PATCH 1/2] blk-mq: convert hctx.nr_active " Yufen Yu
  2019-03-22 14:48 ` [PATCH 2/2] blk-mq: covert blk_mq_tag.active_queues " Yufen Yu
@ 2019-03-22 15:23 ` Jens Axboe
  2 siblings, 0 replies; 9+ messages in thread
From: Jens Axboe @ 2019-03-22 15:23 UTC (permalink / raw)
  To: Yufen Yu, dwindsor, peterz; +Cc: linux-block

On 3/22/19 8:48 AM, Yufen Yu wrote:
> Hi,
> 
> This patchset covert atomic variables (hctx.nr_active and blk_mq_tag.active_queues)
> to newly provided refcount_t type and API, which can prevent accidental counter overflows
> and underflows.

Neither of these are reference counts in the sense that things go away when they
hit zero.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] blk-mq: convert hctx.nr_active to refcount_t
  2019-03-22 15:21       ` Jens Axboe
@ 2019-03-23 12:30         ` yuyufen
  0 siblings, 0 replies; 9+ messages in thread
From: yuyufen @ 2019-03-23 12:30 UTC (permalink / raw)
  To: Jens Axboe, Peter Zijlstra; +Cc: dwindsor, linux-block

Hi


On 2019/3/22 23:21, Jens Axboe wrote:
> On 3/22/19 9:04 AM, Peter Zijlstra wrote:
>> On Fri, Mar 22, 2019 at 04:01:16PM +0100, Peter Zijlstra wrote:
>>> On Fri, Mar 22, 2019 at 10:48:17PM +0800, Yufen Yu wrote:
>>>> @@ -2710,7 +2710,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
>>>>   		return NULL;
>>>>   	}
>>>>   
>>>> -	atomic_set(&hctx->nr_active, 0);
>>>> +	refcount_set(&hctx->nr_active, 0);
>>>>   	hctx->numa_node = node;
>>>>   	hctx->queue_num = hctx_idx;
>>>>   
>>> That looks bogus, refcount_t cannot inc-from-zero.
>> I also don't see a single dec_and_test in that patch, which leads me to
>> believe nr_active is not in fact a refcount.
> It isn't a refcount at all, it's just a count of how many queues are
> active in a shared tag map scenario.
>

Sorry for the noise. And thanks a lot for review. I think I have no idea 
for the difference
between count and refcount. So I think it can covert 'atomic' to 
'refcount' anywhere
no matter if it is really needed. After all , 'refcount' has advantages 
to check overflow and underflow.

Elena Reshetova have clearly summarized which atomic_t should be covert 
to refcount_t [1], which was worth me to learn.
" atomic_t variables are currently used to implement reference
counters with the following properties:
  - counter is initialized to 1 using atomic_set()
  - a resource is freed upon counter reaching zero
  - once counter reaches zero, its further
    increments aren't allowed
  - counter schema uses basic atomic operations
    (set, inc, inc_not_zero, dec_and_test, etc.)
Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. "

[1] https://lore.kernel.org/patchwork/patch/826782/

Yufen
Thanks


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-03-23 12:30 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-03-22 14:48 [PATCH 0/2] blk-mq: convert atomic_t to refcount_t Yufen Yu
2019-03-22 14:48 ` [PATCH 1/2] blk-mq: convert hctx.nr_active " Yufen Yu
2019-03-22 15:01   ` Peter Zijlstra
2019-03-22 15:04     ` Peter Zijlstra
2019-03-22 15:21       ` Jens Axboe
2019-03-23 12:30         ` yuyufen
2019-03-22 14:48 ` [PATCH 2/2] blk-mq: covert blk_mq_tag.active_queues " Yufen Yu
2019-03-22 15:02   ` Peter Zijlstra
2019-03-22 15:23 ` [PATCH 0/2] blk-mq: convert atomic_t " Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.