From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1469AC48BD5 for ; Tue, 25 Jun 2019 15:36:24 +0000 (UTC) Received: from dpdk.org (dpdk.org [92.243.14.124]) by mail.kernel.org (Postfix) with ESMTP id A7E8C2085A for ; Tue, 25 Jun 2019 15:36:23 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org A7E8C2085A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=dev-bounces@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 542881BBDF; Tue, 25 Jun 2019 17:32:50 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 41BE11BA42 for ; Tue, 25 Jun 2019 17:32:27 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Jun 2019 08:32:26 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,416,1557212400"; d="scan'208";a="166711615" Received: from silpixa00381635.ir.intel.com (HELO silpixa00381635.ger.corp.intel.com) ([10.237.223.4]) by orsmga006.jf.intel.com with ESMTP; 25 Jun 2019 08:32:25 -0700 From: Jasvinder Singh To: dev@dpdk.org Cc: cristian.dumitrescu@intel.com, Abraham Tovar , Lukasz Krakowiak Date: Tue, 25 Jun 2019 16:32:08 +0100 Message-Id: <20190625153217.24301-20-jasvinder.singh@intel.com> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20190625153217.24301-1-jasvinder.singh@intel.com> References: <20190528120553.2992-2-lukaszx.krakowiak@intel.com> <20190625153217.24301-1-jasvinder.singh@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 19/28] sched: update grinder schedule function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Update grinder schedule function to allow configuration flexiblity for pipe traffic classes and queues, and subport level configuration of the pipe parameters. Signed-off-by: Jasvinder Singh Signed-off-by: Abraham Tovar Signed-off-by: Lukasz Krakowiak --- lib/librte_sched/rte_sched.c | 82 ++++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 607fe6c18..f468827f4 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -2096,14 +2096,14 @@ grinder_credits_update(struct rte_sched_port *port, #ifndef RTE_SCHED_SUBPORT_TC_OV static inline int -grinder_credits_check(struct rte_sched_port *port, uint32_t pos) +grinder_credits_check(struct rte_sched_subport *subport, + uint32_t pos, uint32_t frame_overhead) { - struct rte_sched_grinder *grinder = port->grinder + pos; - struct rte_sched_subport *subport = grinder->subport; + struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_pipe *pipe = grinder->pipe; struct rte_mbuf *pkt = grinder->pkt; uint32_t tc_index = grinder->tc_index; - uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; + uint32_t pkt_len = pkt->pkt_len + frame_overhead; uint32_t subport_tb_credits = subport->tb_credits; uint32_t subport_tc_credits = subport->tc_credits[tc_index]; uint32_t pipe_tb_credits = pipe->tb_credits; @@ -2119,7 +2119,7 @@ grinder_credits_check(struct rte_sched_port *port, uint32_t pos) if (!enough_credits) return 0; - /* Update port credits */ + /* Update subport credits */ subport->tb_credits -= pkt_len; subport->tc_credits[tc_index] -= pkt_len; pipe->tb_credits -= pkt_len; @@ -2131,23 +2131,30 @@ grinder_credits_check(struct rte_sched_port *port, uint32_t pos) #else static inline int -grinder_credits_check(struct rte_sched_port *port, uint32_t pos) +grinder_credits_check(struct rte_sched_subport *subport, + uint32_t pos, uint32_t frame_overhead) { - struct rte_sched_grinder *grinder = port->grinder + pos; - struct rte_sched_subport *subport = grinder->subport; + struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_pipe *pipe = grinder->pipe; struct rte_mbuf *pkt = grinder->pkt; uint32_t tc_index = grinder->tc_index; - uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; + uint32_t pkt_len = pkt->pkt_len + frame_overhead; uint32_t subport_tb_credits = subport->tb_credits; uint32_t subport_tc_credits = subport->tc_credits[tc_index]; uint32_t pipe_tb_credits = pipe->tb_credits; uint32_t pipe_tc_credits = pipe->tc_credits[tc_index]; - uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, pipe->tc_ov_credits}; - uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX}; - uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index]; + uint32_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0}; + uint32_t pipe_tc_ov_credits, i; int enough_credits; + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + pipe_tc_ov_mask1[i] = UINT32_MAX; + + pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits; + pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = UINT32_MAX; + pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index]; + /* Check pipe and subport credits */ enough_credits = (pkt_len <= subport_tb_credits) && (pkt_len <= subport_tc_credits) && @@ -2170,36 +2177,48 @@ grinder_credits_check(struct rte_sched_port *port, uint32_t pos) #endif /* RTE_SCHED_SUBPORT_TC_OV */ - static inline int -grinder_schedule(struct rte_sched_port *port, uint32_t pos) +grinder_schedule(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) { - struct rte_sched_grinder *grinder = port->grinder + pos; - struct rte_sched_queue *queue = grinder->queue[grinder->qpos]; + struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_mbuf *pkt = grinder->pkt; - uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; + struct rte_sched_queue *queue; + uint32_t frame_overhead = port->frame_overhead; + uint32_t qpos, pkt_len; + int be_tc_active; - if (!grinder_credits_check(port, pos)) + if (!grinder_credits_check(subport, pos, frame_overhead)) return 0; + pkt_len = pkt->pkt_len + frame_overhead; + qpos = grinder->qpos; + queue = grinder->queue[qpos]; + /* Advance port time */ port->time += pkt_len; /* Send packet */ port->pkts_out[port->n_pkts_out++] = pkt; queue->qr++; - grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos]; + + be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE); + grinder->wrr_tokens[qpos] += + pkt_len * grinder->wrr_cost[qpos] * be_tc_active; + if (queue->qr == queue->qw) { - uint32_t qindex = grinder->qindex[grinder->qpos]; + uint32_t qindex = grinder->qindex[qpos]; + + rte_bitmap_clear(subport->bmp, qindex); + grinder->qmask &= ~(1 << qpos); + if (be_tc_active) + grinder->wrr_mask[qpos] = 0; - rte_bitmap_clear(port->bmp, qindex); - grinder->qmask &= ~(1 << grinder->qpos); - grinder->wrr_mask[grinder->qpos] = 0; - rte_sched_port_set_queue_empty_timestamp(port, port->subport, qindex); + rte_sched_port_set_queue_empty_timestamp(port, subport, qindex); } /* Reset pipe loop detection */ - port->pipe_loop = RTE_SCHED_PIPE_INVALID; + subport->pipe_loop = RTE_SCHED_PIPE_INVALID; grinder->productive = 1; return 1; @@ -2585,14 +2604,15 @@ grinder_prefetch_mbuf(struct rte_sched_subport *subport, uint32_t pos) static inline uint32_t grinder_handle(struct rte_sched_port *port, uint32_t pos) { - struct rte_sched_grinder *grinder = port->grinder + pos; + struct rte_sched_subport *subport = port->subport; + struct rte_sched_grinder *grinder = subport->grinder + pos; switch (grinder->state) { case e_GRINDER_PREFETCH_PIPE: { - if (grinder_next_pipe(port->subport, pos)) { - grinder_prefetch_pipe(port->subport, pos); - port->busy_grinders++; + if (grinder_next_pipe(subport, pos)) { + grinder_prefetch_pipe(subport, pos); + subport->busy_grinders++; grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS; return 0; @@ -2615,7 +2635,7 @@ grinder_handle(struct rte_sched_port *port, uint32_t pos) case e_GRINDER_PREFETCH_MBUF: { - grinder_prefetch_mbuf(port->subport, pos); + grinder_prefetch_mbuf(subport, pos); grinder->state = e_GRINDER_READ_MBUF; return 0; @@ -2625,7 +2645,7 @@ grinder_handle(struct rte_sched_port *port, uint32_t pos) { uint32_t result = 0; - result = grinder_schedule(port, pos); + result = grinder_schedule(port, subport, pos); /* Look for next packet within the same TC */ if (result && grinder->qmask) { -- 2.21.0