From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754076AbcBAWuP (ORCPT ); Mon, 1 Feb 2016 17:50:15 -0500 Received: from spostino.sms.unimo.it ([155.185.44.3]:56042 "EHLO spostino.sms.unimo.it" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753839AbcBAWuL (ORCPT ); Mon, 1 Feb 2016 17:50:11 -0500 From: Paolo Valente To: Jens Axboe , Tejun Heo Cc: Fabio Checconi , Arianna Avanzini , linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, ulf.hansson@linaro.org, linus.walleij@linaro.org, broonie@kernel.org, Paolo Valente Subject: [PATCH RFC 06/22] block, cfq: get rid of queue preemption Date: Mon, 1 Feb 2016 23:12:42 +0100 Message-Id: <1454364778-25179-7-git-send-email-paolo.valente@linaro.org> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1454364778-25179-1-git-send-email-paolo.valente@linaro.org> References: <1454364778-25179-1-git-send-email-paolo.valente@linaro.org> UNIMORE-X-SA-Score: -2.9 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Arianna Avanzini CFQ implements a request-triggered queue preemption, based on the priority class of the queues, and aimed at reducing latencies. There is no such preemption in BFQ, where a low latency is guaranteed only by the overall request-scheduling policy. Signed-off-by: Arianna Avanzini Signed-off-by: Paolo Valente --- block/cfq-iosched.c | 99 +---------------------------------------------------- 1 file changed, 1 insertion(+), 98 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 33ae8dd..48ab681 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1069,8 +1069,7 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) } /* - * Get next queue for service. Unless we have a queue preemption, - * we'll simply select the first cfqq in the service tree. + * Get next queue for service. */ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) { @@ -1929,93 +1928,6 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, } /* - * Check if new_cfqq should preempt the currently active queue. Return 0 for - * no or if we aren't sure, a 1 will cause a preempt. - */ -static bool -cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, - struct request *rq) -{ - struct cfq_queue *cfqq; - - cfqq = cfqd->active_queue; - if (!cfqq) - return false; - - if (cfq_class_idle(new_cfqq)) - return false; - - if (cfq_class_idle(cfqq)) - return true; - - /* - * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. - */ - if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) - return false; - - /* - * if the new request is sync, but the currently running queue is - * not, let the sync request have priority. - */ - if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) - return true; - - if (cfq_slice_used(cfqq)) - return true; - - /* Allow preemption only if we are idling on sync-noidle tree */ - if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && - cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && - new_cfqq->service_tree->count == 2 && - RB_EMPTY_ROOT(&cfqq->sort_list)) - return true; - - /* - * So both queues are sync. Let the new request get disk time if - * it's a metadata request and the current queue is doing regular IO. - */ - if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) - return true; - - /* - * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. - */ - if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) - return true; - - /* An idle queue should not be idle now for some reason */ - if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) - return true; - - if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) - return false; - - return false; -} - -/* - * cfqq preempts the active queue. if we allowed preempt with no slice left, - * let it have half of its nominal slice. - */ -static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) -{ - cfq_log_cfqq(cfqd, cfqq, "preempt"); - cfq_slice_expired(cfqd, 1); - - /* - * Put the new queue at the front of the of the current list, - * so we know that it will be selected next. - */ - BUG_ON(!cfq_cfqq_on_rr(cfqq)); - - cfq_service_tree_add(cfqd, cfqq, 1); - - cfqq->slice_end = 0; - cfq_mark_cfqq_slice_new(cfqq); -} - -/* * Called when a new fs request (rq) is added (to cfqq). Check if there's * something we should do about it */ @@ -2055,15 +1967,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, } else cfq_mark_cfqq_must_dispatch(cfqq); } - } else if (cfq_should_preempt(cfqd, cfqq, rq)) { - /* - * not the active queue - expire current slice if it is - * idle and has expired it's mean thinktime or this new queue - * has some old slice time left and is of higher priority or - * this new queue is RT and the current one is BE - */ - cfq_preempt_queue(cfqd, cfqq); - __blk_run_queue(cfqd->queue); } } -- 1.9.1