linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 0/2] optimize the bfq queue idle judgment
@ 2021-10-14  1:45 Yu Kuai
  2021-10-14  1:45 ` [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs' Yu Kuai
  2021-10-14  1:45 ` [PATCH v4 2/2] block, bfq: do not idle if only one cgroup is activated Yu Kuai
  0 siblings, 2 replies; 9+ messages in thread
From: Yu Kuai @ 2021-10-14  1:45 UTC (permalink / raw)
  To: paolo.valente, axboe; +Cc: linux-block, linux-kernel, yukuai3, yi.zhang

Changes in V4:
 - fix a compile warning when CONFIG_BLK_CGROUP is not enabled.

Changes in V3:
 - Instead of tracking each queue in root group, tracking root group
 directly just like non-root group does.
 - remove patch 3,4 from these series.

Chagnes in V2:
 - as suggested by Paolo, add support to track if root_group have any
 pending requests, and use that to handle the situation when only one
 group is activated while root group doesn't have any pending requests.
 - modify commit message in patch 2

Yu Kuai (2):
  block, bfq: counted root group into 'num_groups_with_pending_reqs'
  block, bfq: do not idle if only one cgroup is activated

 block/bfq-iosched.c | 40 ++++++++++++++++++++++++++++--------
 block/bfq-wf2q.c    | 50 +++++++++++++++++++++++++++++++++------------
 2 files changed, 69 insertions(+), 21 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-14  1:45 [PATCH v4 0/2] optimize the bfq queue idle judgment Yu Kuai
@ 2021-10-14  1:45 ` Yu Kuai
  2021-10-20  8:51   ` Paolo Valente
  2021-10-14  1:45 ` [PATCH v4 2/2] block, bfq: do not idle if only one cgroup is activated Yu Kuai
  1 sibling, 1 reply; 9+ messages in thread
From: Yu Kuai @ 2021-10-14  1:45 UTC (permalink / raw)
  To: paolo.valente, axboe; +Cc: linux-block, linux-kernel, yukuai3, yi.zhang

'num_groups_with_pending_reqs' represents how many groups that are
not root group and have pending requests. This patch also counted
root group into 'num_groups_with_pending_reqs'.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/bfq-iosched.c | 36 ++++++++++++++++++++++++++------
 block/bfq-wf2q.c    | 50 +++++++++++++++++++++++++++++++++------------
 2 files changed, 67 insertions(+), 19 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index fec18118dc30..d251735383f7 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -852,6 +852,16 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
 	bfq_put_queue(bfqq);
 }
 
+static inline void
+bfq_clear_group_with_pending_reqs(struct bfq_data *bfqd,
+				  struct bfq_entity *entity)
+{
+	if (entity->in_groups_with_pending_reqs) {
+		entity->in_groups_with_pending_reqs = false;
+		bfqd->num_groups_with_pending_reqs--;
+	}
+}
+
 /*
  * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
  * of active groups for each queue's inactive parent entity.
@@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
 			     struct bfq_queue *bfqq)
 {
 	struct bfq_entity *entity = bfqq->entity.parent;
+	struct bfq_sched_data *sd;
+
+	/*
+	 * If the bfq queue is in root group, the decrement of
+	 * num_groups_with_pending_reqs is performed immediately upon the
+	 * deactivation of entity.
+	 */
+	if (!entity) {
+		entity = &bfqd->root_group->entity;
+		sd = entity->my_sched_data;
+
+		if (!sd->in_service_entity)
+			bfq_clear_group_with_pending_reqs(bfqd, entity);
+
+		return;
+	}
 
 	for_each_entity(entity) {
-		struct bfq_sched_data *sd = entity->my_sched_data;
+		sd = entity->my_sched_data;
 
 		if (sd->next_in_service || sd->in_service_entity) {
 			/*
@@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
 		}
 
 		/*
-		 * The decrement of num_groups_with_pending_reqs is
+		 * If the bfq queue is not in root group,
+		 * the decrement of num_groups_with_pending_reqs is
 		 * not performed immediately upon the deactivation of
 		 * entity, but it is delayed to when it also happens
 		 * that the first leaf descendant bfqq of entity gets
@@ -889,10 +916,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
 		 * needed. See the comments on
 		 * num_groups_with_pending_reqs for details.
 		 */
-		if (entity->in_groups_with_pending_reqs) {
-			entity->in_groups_with_pending_reqs = false;
-			bfqd->num_groups_with_pending_reqs--;
-		}
+		bfq_clear_group_with_pending_reqs(bfqd, entity);
 	}
 
 	/*
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index b74cc0da118e..3e9e672aa302 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -945,6 +945,42 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
 
 	bfq_active_insert(st, entity);
 }
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+static inline void
+bfq_set_group_with_pending_reqs(struct bfq_data *bfqd,
+				struct bfq_entity *entity)
+{
+	if (!entity->in_groups_with_pending_reqs) {
+		entity->in_groups_with_pending_reqs = true;
+		bfqd->num_groups_with_pending_reqs++;
+	}
+}
+
+static void bfq_update_groups_with_pending_reqs(struct bfq_entity *entity)
+{
+	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+
+	if (bfqq) {
+		/*
+		 * If the entity represents bfq_queue, and the queue belongs to
+		 * root cgroup.
+		 */
+		if (!entity->parent)
+			bfq_set_group_with_pending_reqs(bfqq->bfqd,
+				&bfqq->bfqd->root_group->entity);
+	} else {
+		/* If the entity represents bfq_group. */
+		struct bfq_group *bfqg =
+			container_of(entity, struct bfq_group, entity);
+		struct bfq_data *bfqd = bfqg->bfqd;
+
+		bfq_set_group_with_pending_reqs(bfqd, entity);
+	}
+}
+#else
+#define bfq_update_groups_with_pending_reqs(entity) \
+	do {} while (0)
+#endif
 
 /**
  * __bfq_activate_entity - handle activation of entity.
@@ -999,19 +1035,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
 		entity->on_st_or_in_serv = true;
 	}
 
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
-	if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
-		struct bfq_group *bfqg =
-			container_of(entity, struct bfq_group, entity);
-		struct bfq_data *bfqd = bfqg->bfqd;
-
-		if (!entity->in_groups_with_pending_reqs) {
-			entity->in_groups_with_pending_reqs = true;
-			bfqd->num_groups_with_pending_reqs++;
-		}
-	}
-#endif
-
+	bfq_update_groups_with_pending_reqs(entity);
 	bfq_update_fin_time_enqueue(entity, st, backshifted);
 }
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v4 2/2] block, bfq: do not idle if only one cgroup is activated
  2021-10-14  1:45 [PATCH v4 0/2] optimize the bfq queue idle judgment Yu Kuai
  2021-10-14  1:45 ` [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs' Yu Kuai
@ 2021-10-14  1:45 ` Yu Kuai
  1 sibling, 0 replies; 9+ messages in thread
From: Yu Kuai @ 2021-10-14  1:45 UTC (permalink / raw)
  To: paolo.valente, axboe; +Cc: linux-block, linux-kernel, yukuai3, yi.zhang

If only one group is activated, there is no need to guarantee the
same share of the throughput of queues in the same group.

Test procedure:
run "fio -numjobs=1 -ioengine=psync -bs=4k -direct=1 -rw=randread..."
multiple times in the same cgroup.

Test result: total bandwidth(Mib/s)
| total jobs | before this patch | after this patch      |
| ---------- | ----------------- | --------------------- |
| 1          | 33.8              | 33.8                  |
| 2          | 33.8              | 65.4 (32.7 each job)  |
| 4          | 33.8              | 106.8 (26.7 each job) |
| 8          | 33.8              | 126.4 (15.8 each job) |

By the way, if I test with "fio -numjobs=1/2/4/8 ...", test result is
the same with or without this patch. This is because bfq_queue can
be merged in this situation.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/bfq-iosched.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index d251735383f7..8d94f511bee8 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -709,7 +709,7 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  * much easier to maintain the needed state:
  * 1) all active queues have the same weight,
  * 2) all active queues belong to the same I/O-priority class,
- * 3) there are no active groups.
+ * 3) there are one active group at most.
  * In particular, the last condition is always true if hierarchical
  * support or the cgroups interface are not enabled, thus no state
  * needs to be maintained in this case.
@@ -741,7 +741,7 @@ static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
 
 	return varied_queue_weights || multiple_classes_busy
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
-	       || bfqd->num_groups_with_pending_reqs > 0
+	       || bfqd->num_groups_with_pending_reqs > 1
 #endif
 		;
 }
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-14  1:45 ` [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs' Yu Kuai
@ 2021-10-20  8:51   ` Paolo Valente
  2021-10-20  9:20     ` yukuai (C)
  0 siblings, 1 reply; 9+ messages in thread
From: Paolo Valente @ 2021-10-20  8:51 UTC (permalink / raw)
  To: Yu Kuai; +Cc: Jens Axboe, linux-block, linux-kernel, yi.zhang



> Il giorno 14 ott 2021, alle ore 03:45, Yu Kuai <yukuai3@huawei.com> ha scritto:
> 
> 'num_groups_with_pending_reqs' represents how many groups that are
> not root group and have pending requests. This patch also counted
> root group into 'num_groups_with_pending_reqs'.
> 
> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> ---
> block/bfq-iosched.c | 36 ++++++++++++++++++++++++++------
> block/bfq-wf2q.c    | 50 +++++++++++++++++++++++++++++++++------------
> 2 files changed, 67 insertions(+), 19 deletions(-)
> 
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index fec18118dc30..d251735383f7 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -852,6 +852,16 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
> 	bfq_put_queue(bfqq);
> }
> 
> +static inline void
> +bfq_clear_group_with_pending_reqs(struct bfq_data *bfqd,
> +				  struct bfq_entity *entity)
> +{
> +	if (entity->in_groups_with_pending_reqs) {
> +		entity->in_groups_with_pending_reqs = false;
> +		bfqd->num_groups_with_pending_reqs--;
> +	}
> +}
> +
> /*
>  * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
>  * of active groups for each queue's inactive parent entity.
> @@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
> 			     struct bfq_queue *bfqq)
> {
> 	struct bfq_entity *entity = bfqq->entity.parent;
> +	struct bfq_sched_data *sd;
> +
> +	/*
> +	 * If the bfq queue is in root group, the decrement of
> +	 * num_groups_with_pending_reqs is performed immediately upon the
> +	 * deactivation of entity.
> +	 */
> +	if (!entity) {
> +		entity = &bfqd->root_group->entity;
> +		sd = entity->my_sched_data;
> +
> +		if (!sd->in_service_entity)
> +			bfq_clear_group_with_pending_reqs(bfqd, entity);
> +
> +		return;
> +	}
> 
> 	for_each_entity(entity) {
> -		struct bfq_sched_data *sd = entity->my_sched_data;
> +		sd = entity->my_sched_data;
> 
> 		if (sd->next_in_service || sd->in_service_entity) {
> 			/*
> @@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
> 		}
> 
> 		/*
> -		 * The decrement of num_groups_with_pending_reqs is
> +		 * If the bfq queue is not in root group,
> +		 * the decrement of num_groups_with_pending_reqs is


I'm sorry if I didn't notice this before, but why do you postpone the
decrement only for queues not in root group?  If I'm not missing
anything, the active (i.e., with pending reqs) state of the root group
is to be computed as that of ay other group.

Thanks,
Paolo

> 		 * not performed immediately upon the deactivation of
> 		 * entity, but it is delayed to when it also happens
> 		 * that the first leaf descendant bfqq of entity gets
> @@ -889,10 +916,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
> 		 * needed. See the comments on
> 		 * num_groups_with_pending_reqs for details.
> 		 */
> -		if (entity->in_groups_with_pending_reqs) {
> -			entity->in_groups_with_pending_reqs = false;
> -			bfqd->num_groups_with_pending_reqs--;
> -		}
> +		bfq_clear_group_with_pending_reqs(bfqd, entity);
> 	}
> 
> 	/*
> diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
> index b74cc0da118e..3e9e672aa302 100644
> --- a/block/bfq-wf2q.c
> +++ b/block/bfq-wf2q.c
> @@ -945,6 +945,42 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
> 
> 	bfq_active_insert(st, entity);
> }
> +#ifdef CONFIG_BFQ_GROUP_IOSCHED
> +static inline void
> +bfq_set_group_with_pending_reqs(struct bfq_data *bfqd,
> +				struct bfq_entity *entity)
> +{
> +	if (!entity->in_groups_with_pending_reqs) {
> +		entity->in_groups_with_pending_reqs = true;
> +		bfqd->num_groups_with_pending_reqs++;
> +	}
> +}
> +
> +static void bfq_update_groups_with_pending_reqs(struct bfq_entity *entity)
> +{
> +	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
> +
> +	if (bfqq) {
> +		/*
> +		 * If the entity represents bfq_queue, and the queue belongs to
> +		 * root cgroup.
> +		 */
> +		if (!entity->parent)
> +			bfq_set_group_with_pending_reqs(bfqq->bfqd,
> +				&bfqq->bfqd->root_group->entity);
> +	} else {
> +		/* If the entity represents bfq_group. */
> +		struct bfq_group *bfqg =
> +			container_of(entity, struct bfq_group, entity);
> +		struct bfq_data *bfqd = bfqg->bfqd;
> +
> +		bfq_set_group_with_pending_reqs(bfqd, entity);
> +	}
> +}
> +#else
> +#define bfq_update_groups_with_pending_reqs(entity) \
> +	do {} while (0)
> +#endif
> 
> /**
>  * __bfq_activate_entity - handle activation of entity.
> @@ -999,19 +1035,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
> 		entity->on_st_or_in_serv = true;
> 	}
> 
> -#ifdef CONFIG_BFQ_GROUP_IOSCHED
> -	if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
> -		struct bfq_group *bfqg =
> -			container_of(entity, struct bfq_group, entity);
> -		struct bfq_data *bfqd = bfqg->bfqd;
> -
> -		if (!entity->in_groups_with_pending_reqs) {
> -			entity->in_groups_with_pending_reqs = true;
> -			bfqd->num_groups_with_pending_reqs++;
> -		}
> -	}
> -#endif
> -
> +	bfq_update_groups_with_pending_reqs(entity);
> 	bfq_update_fin_time_enqueue(entity, st, backshifted);
> }
> 
> -- 
> 2.31.1
> 


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-20  8:51   ` Paolo Valente
@ 2021-10-20  9:20     ` yukuai (C)
  2021-10-20  9:29       ` Paolo Valente
  0 siblings, 1 reply; 9+ messages in thread
From: yukuai (C) @ 2021-10-20  9:20 UTC (permalink / raw)
  To: Paolo Valente; +Cc: Jens Axboe, linux-block, linux-kernel, yi.zhang

On 2021/10/20 16:51, Paolo Valente wrote:

>> @@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>> 			     struct bfq_queue *bfqq)
>> {
>> 	struct bfq_entity *entity = bfqq->entity.parent;
>> +	struct bfq_sched_data *sd;
>> +
>> +	/*
>> +	 * If the bfq queue is in root group, the decrement of
>> +	 * num_groups_with_pending_reqs is performed immediately upon the
>> +	 * deactivation of entity.
>> +	 */
>> +	if (!entity) {
>> +		entity = &bfqd->root_group->entity;
>> +		sd = entity->my_sched_data;
>> +
>> +		if (!sd->in_service_entity)
>> +			bfq_clear_group_with_pending_reqs(bfqd, entity);
>> +
>> +		return;
>> +	}
>>
>> 	for_each_entity(entity) {
>> -		struct bfq_sched_data *sd = entity->my_sched_data;
>> +		sd = entity->my_sched_data;
>>
>> 		if (sd->next_in_service || sd->in_service_entity) {
>> 			/*
>> @@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>> 		}
>>
>> 		/*
>> -		 * The decrement of num_groups_with_pending_reqs is
>> +		 * If the bfq queue is not in root group,
>> +		 * the decrement of num_groups_with_pending_reqs is
> 
> 
> I'm sorry if I didn't notice this before, but why do you postpone the
> decrement only for queues not in root group?  If I'm not missing
> anything, the active (i.e., with pending reqs) state of the root group
> is to be computed as that of ay other group.

Hi, Paolo

I thought if queue is in root group, then bfqq->entity.parent is NULL,
and such case is handled above, which is separate from previous
implementation for queues that are not in root group.

Is this the wrong way to handle root group?

Thanks,
Kuai

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-20  9:20     ` yukuai (C)
@ 2021-10-20  9:29       ` Paolo Valente
  2021-10-20  9:38         ` yukuai (C)
  0 siblings, 1 reply; 9+ messages in thread
From: Paolo Valente @ 2021-10-20  9:29 UTC (permalink / raw)
  To: yukuai (C); +Cc: Jens Axboe, linux-block, linux-kernel, yi.zhang



> Il giorno 20 ott 2021, alle ore 11:20, yukuai (C) <yukuai3@huawei.com> ha scritto:
> 
> On 2021/10/20 16:51, Paolo Valente wrote:
> 
>>> @@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>> 			     struct bfq_queue *bfqq)
>>> {
>>> 	struct bfq_entity *entity = bfqq->entity.parent;
>>> +	struct bfq_sched_data *sd;
>>> +
>>> +	/*
>>> +	 * If the bfq queue is in root group, the decrement of
>>> +	 * num_groups_with_pending_reqs is performed immediately upon the
>>> +	 * deactivation of entity.
>>> +	 */
>>> +	if (!entity) {
>>> +		entity = &bfqd->root_group->entity;
>>> +		sd = entity->my_sched_data;
>>> +
>>> +		if (!sd->in_service_entity)
>>> +			bfq_clear_group_with_pending_reqs(bfqd, entity);
>>> +
>>> +		return;
>>> +	}
>>> 
>>> 	for_each_entity(entity) {
>>> -		struct bfq_sched_data *sd = entity->my_sched_data;
>>> +		sd = entity->my_sched_data;
>>> 
>>> 		if (sd->next_in_service || sd->in_service_entity) {
>>> 			/*
>>> @@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>> 		}
>>> 
>>> 		/*
>>> -		 * The decrement of num_groups_with_pending_reqs is
>>> +		 * If the bfq queue is not in root group,
>>> +		 * the decrement of num_groups_with_pending_reqs is
>> I'm sorry if I didn't notice this before, but why do you postpone the
>> decrement only for queues not in root group?  If I'm not missing
>> anything, the active (i.e., with pending reqs) state of the root group
>> is to be computed as that of ay other group.
> 
> Hi, Paolo
> 
> I thought if queue is in root group, then bfqq->entity.parent is NULL,
> and such case is handled above, which is separate from previous
> implementation for queues that are not in root group.
> 
> Is this the wrong way to handle root group?
> 

I think that, if we want to count also the root group among the active
ones, then the logic for tagging the root group as active must be the
same as the other groups. Or am I missing something?

Thanks,
Paolo

> Thanks,
> Kuai


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-20  9:29       ` Paolo Valente
@ 2021-10-20  9:38         ` yukuai (C)
  2021-10-20  9:43           ` Paolo Valente
  0 siblings, 1 reply; 9+ messages in thread
From: yukuai (C) @ 2021-10-20  9:38 UTC (permalink / raw)
  To: Paolo Valente; +Cc: Jens Axboe, linux-block, linux-kernel, yi.zhang

On 2021/10/20 17:29, Paolo Valente wrote:
> 
> 
>> Il giorno 20 ott 2021, alle ore 11:20, yukuai (C) <yukuai3@huawei.com> ha scritto:
>>
>> On 2021/10/20 16:51, Paolo Valente wrote:
>>
>>>> @@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>>> 			     struct bfq_queue *bfqq)
>>>> {
>>>> 	struct bfq_entity *entity = bfqq->entity.parent;
>>>> +	struct bfq_sched_data *sd;
>>>> +
>>>> +	/*
>>>> +	 * If the bfq queue is in root group, the decrement of
>>>> +	 * num_groups_with_pending_reqs is performed immediately upon the
>>>> +	 * deactivation of entity.
>>>> +	 */
>>>> +	if (!entity) {
>>>> +		entity = &bfqd->root_group->entity;
>>>> +		sd = entity->my_sched_data;
>>>> +
>>>> +		if (!sd->in_service_entity)
>>>> +			bfq_clear_group_with_pending_reqs(bfqd, entity);
>>>> +
>>>> +		return;
>>>> +	}
>>>>
>>>> 	for_each_entity(entity) {
>>>> -		struct bfq_sched_data *sd = entity->my_sched_data;
>>>> +		sd = entity->my_sched_data;
>>>>
>>>> 		if (sd->next_in_service || sd->in_service_entity) {
>>>> 			/*
>>>> @@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>>> 		}
>>>>
>>>> 		/*
>>>> -		 * The decrement of num_groups_with_pending_reqs is
>>>> +		 * If the bfq queue is not in root group,
>>>> +		 * the decrement of num_groups_with_pending_reqs is
>>> I'm sorry if I didn't notice this before, but why do you postpone the
>>> decrement only for queues not in root group?  If I'm not missing
>>> anything, the active (i.e., with pending reqs) state of the root group
>>> is to be computed as that of ay other group.
>>
>> Hi, Paolo
>>
>> I thought if queue is in root group, then bfqq->entity.parent is NULL,
>> and such case is handled above, which is separate from previous
>> implementation for queues that are not in root group.
>>
>> Is this the wrong way to handle root group?
>>
> 
> I think that, if we want to count also the root group among the active
> ones, then the logic for tagging the root group as active must be the
> same as the other groups. Or am I missing something?

Hi, Paolo

Currently, if queue is in root group, bfqq->entity.parent is NULL, and
this makes it hard to keep the same logic.

Can we store root_group->my_entity to bfqq->entity.parent if the queue
is in root group?

Thanks,
Kuai


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-20  9:38         ` yukuai (C)
@ 2021-10-20  9:43           ` Paolo Valente
  2021-10-20 11:53             ` yukuai (C)
  0 siblings, 1 reply; 9+ messages in thread
From: Paolo Valente @ 2021-10-20  9:43 UTC (permalink / raw)
  To: yukuai (C); +Cc: Jens Axboe, linux-block, linux-kernel, yi.zhang



> Il giorno 20 ott 2021, alle ore 11:38, yukuai (C) <yukuai3@huawei.com> ha scritto:
> 
> On 2021/10/20 17:29, Paolo Valente wrote:
>>> Il giorno 20 ott 2021, alle ore 11:20, yukuai (C) <yukuai3@huawei.com> ha scritto:
>>> 
>>> On 2021/10/20 16:51, Paolo Valente wrote:
>>> 
>>>>> @@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>>>> 			     struct bfq_queue *bfqq)
>>>>> {
>>>>> 	struct bfq_entity *entity = bfqq->entity.parent;
>>>>> +	struct bfq_sched_data *sd;
>>>>> +
>>>>> +	/*
>>>>> +	 * If the bfq queue is in root group, the decrement of
>>>>> +	 * num_groups_with_pending_reqs is performed immediately upon the
>>>>> +	 * deactivation of entity.
>>>>> +	 */
>>>>> +	if (!entity) {
>>>>> +		entity = &bfqd->root_group->entity;
>>>>> +		sd = entity->my_sched_data;
>>>>> +
>>>>> +		if (!sd->in_service_entity)
>>>>> +			bfq_clear_group_with_pending_reqs(bfqd, entity);
>>>>> +
>>>>> +		return;
>>>>> +	}
>>>>> 
>>>>> 	for_each_entity(entity) {
>>>>> -		struct bfq_sched_data *sd = entity->my_sched_data;
>>>>> +		sd = entity->my_sched_data;
>>>>> 
>>>>> 		if (sd->next_in_service || sd->in_service_entity) {
>>>>> 			/*
>>>>> @@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>>>> 		}
>>>>> 
>>>>> 		/*
>>>>> -		 * The decrement of num_groups_with_pending_reqs is
>>>>> +		 * If the bfq queue is not in root group,
>>>>> +		 * the decrement of num_groups_with_pending_reqs is
>>>> I'm sorry if I didn't notice this before, but why do you postpone the
>>>> decrement only for queues not in root group?  If I'm not missing
>>>> anything, the active (i.e., with pending reqs) state of the root group
>>>> is to be computed as that of ay other group.
>>> 
>>> Hi, Paolo
>>> 
>>> I thought if queue is in root group, then bfqq->entity.parent is NULL,
>>> and such case is handled above, which is separate from previous
>>> implementation for queues that are not in root group.
>>> 
>>> Is this the wrong way to handle root group?
>>> 
>> I think that, if we want to count also the root group among the active
>> ones, then the logic for tagging the root group as active must be the
>> same as the other groups. Or am I missing something?
> 
> Hi, Paolo
> 
> Currently, if queue is in root group, bfqq->entity.parent is NULL, and
> this makes it hard to keep the same logic.
> 
> Can we store root_group->my_entity to bfqq->entity.parent if the queue
> is in root group?
> 

Any sensible implementation is ok for me.  Usually, stuff for root
group is in the bfqd.

Thanks,
Paolo

> Thanks,
> Kuai


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs'
  2021-10-20  9:43           ` Paolo Valente
@ 2021-10-20 11:53             ` yukuai (C)
  0 siblings, 0 replies; 9+ messages in thread
From: yukuai (C) @ 2021-10-20 11:53 UTC (permalink / raw)
  To: Paolo Valente; +Cc: Jens Axboe, linux-block, linux-kernel, yi.zhang

On 2021/10/20 17:43, Paolo Valente wrote:
> 
> 
>> Il giorno 20 ott 2021, alle ore 11:38, yukuai (C) <yukuai3@huawei.com> ha scritto:
>>
>> On 2021/10/20 17:29, Paolo Valente wrote:
>>>> Il giorno 20 ott 2021, alle ore 11:20, yukuai (C) <yukuai3@huawei.com> ha scritto:
>>>>
>>>> On 2021/10/20 16:51, Paolo Valente wrote:
>>>>
>>>>>> @@ -860,9 +870,25 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>>>>> 			     struct bfq_queue *bfqq)
>>>>>> {
>>>>>> 	struct bfq_entity *entity = bfqq->entity.parent;
>>>>>> +	struct bfq_sched_data *sd;
>>>>>> +
>>>>>> +	/*
>>>>>> +	 * If the bfq queue is in root group, the decrement of
>>>>>> +	 * num_groups_with_pending_reqs is performed immediately upon the
>>>>>> +	 * deactivation of entity.
>>>>>> +	 */
>>>>>> +	if (!entity) {
>>>>>> +		entity = &bfqd->root_group->entity;
>>>>>> +		sd = entity->my_sched_data;
>>>>>> +
>>>>>> +		if (!sd->in_service_entity)
>>>>>> +			bfq_clear_group_with_pending_reqs(bfqd, entity);
>>>>>> +
>>>>>> +		return;
>>>>>> +	}
>>>>>>
>>>>>> 	for_each_entity(entity) {
>>>>>> -		struct bfq_sched_data *sd = entity->my_sched_data;
>>>>>> +		sd = entity->my_sched_data;
>>>>>>
>>>>>> 		if (sd->next_in_service || sd->in_service_entity) {
>>>>>> 			/*
>>>>>> @@ -880,7 +906,8 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
>>>>>> 		}
>>>>>>
>>>>>> 		/*
>>>>>> -		 * The decrement of num_groups_with_pending_reqs is
>>>>>> +		 * If the bfq queue is not in root group,
>>>>>> +		 * the decrement of num_groups_with_pending_reqs is
>>>>> I'm sorry if I didn't notice this before, but why do you postpone the
>>>>> decrement only for queues not in root group?  If I'm not missing
>>>>> anything, the active (i.e., with pending reqs) state of the root group
>>>>> is to be computed as that of ay other group.
>>>>
>>>> Hi, Paolo
>>>>
>>>> I thought if queue is in root group, then bfqq->entity.parent is NULL,
>>>> and such case is handled above, which is separate from previous
>>>> implementation for queues that are not in root group.
>>>>
>>>> Is this the wrong way to handle root group?
>>>>
>>> I think that, if we want to count also the root group among the active
>>> ones, then the logic for tagging the root group as active must be the
>>> same as the other groups. Or am I missing something?
>>
>> Hi, Paolo
>>
>> Currently, if queue is in root group, bfqq->entity.parent is NULL, and
>> this makes it hard to keep the same logic.
>>
>> Can we store root_group->my_entity to bfqq->entity.parent if the queue
>> is in root group?
>>
> 
> Any sensible implementation is ok for me.  Usually, stuff for root
> group is in the bfqd.
> 

I'll try to do implement that way,

Thanks,
Kuai

> Thanks,
> Paolo
> 
>> Thanks,
>> Kuai
> 
> .
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-10-20 11:53 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-14  1:45 [PATCH v4 0/2] optimize the bfq queue idle judgment Yu Kuai
2021-10-14  1:45 ` [PATCH v4 1/2] block, bfq: counted root group into 'num_groups_with_pending_reqs' Yu Kuai
2021-10-20  8:51   ` Paolo Valente
2021-10-20  9:20     ` yukuai (C)
2021-10-20  9:29       ` Paolo Valente
2021-10-20  9:38         ` yukuai (C)
2021-10-20  9:43           ` Paolo Valente
2021-10-20 11:53             ` yukuai (C)
2021-10-14  1:45 ` [PATCH v4 2/2] block, bfq: do not idle if only one cgroup is activated Yu Kuai

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).