From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-19.6 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,DKIM_VALID_AU,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 78EB4C433E6 for ; Mon, 8 Feb 2021 17:28:24 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 37C6164E7D for ; Mon, 8 Feb 2021 17:28:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234882AbhBHR1p (ORCPT ); Mon, 8 Feb 2021 12:27:45 -0500 Received: from mail.kernel.org ([198.145.29.99]:35240 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232960AbhBHRWB (ORCPT ); Mon, 8 Feb 2021 12:22:01 -0500 Received: by mail.kernel.org (Postfix) with ESMTPSA id 4007764EB9; Mon, 8 Feb 2021 17:19:49 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1612804789; bh=8UKjHuY+6+Se0SIw/BGBgkBnLcYZ7w8PAQId9xAQEI4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=swcxZfaQnRhXm2wlJuHzpowEN6lNjygxoh8zCOjmkx0pOGzRQpXaC12i1HQSSFcqm E701NTyd8hZwAxvabgeJ7426MI3FGl0qfkcl85bYvg0GblfNLkc6vH7hHIUXu+/JRK 6wQjbF8tjD+JT6o0LHCNi6isg/OnJJiM0MvMf1ufBGyO+9lmP+aaCO9WXGldLxM9bs CQ/2J30dTY89UuzKjgfmdQgPD/fcDtt1NTQPXK8BSdaqdka/lbPIWMSl6Y95rttUaE 5QHb7Z7r/nxqfSe0TqwHSvpJHJ6V0WbimRaigpqTijaUlNESGHHNTChNxiPRVUKOFl ogMr5eOcq9y2A== From: Antoine Tenart To: davem@davemloft.net, kuba@kernel.org, alexander.duyck@gmail.com Cc: Antoine Tenart , netdev@vger.kernel.org Subject: [PATCH net-next v2 12/12] net-sysfs: move the xps cpus/rxqs retrieval in a common function Date: Mon, 8 Feb 2021 18:19:17 +0100 Message-Id: <20210208171917.1088230-13-atenart@kernel.org> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20210208171917.1088230-1-atenart@kernel.org> References: <20210208171917.1088230-1-atenart@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Most of the xps_cpus_show and xps_rxqs_show functions share the same logic. Having it in two different functions does not help maintenance. This patch moves their common logic into a new function, xps_queue_show, to improve this. Signed-off-by: Antoine Tenart --- net/core/net-sysfs.c | 98 ++++++++++++++------------------------------ 1 file changed, 31 insertions(+), 67 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 6ce5772e799e..984c15248483 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1314,35 +1314,31 @@ static const struct attribute_group dql_group = { #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS -static ssize_t xps_cpus_show(struct netdev_queue *queue, - char *buf) +static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, + char *buf, enum xps_map_type type) { - struct net_device *dev = queue->dev; struct xps_dev_maps *dev_maps; - unsigned int index, nr_ids; - int j, len, ret, tc = 0; unsigned long *mask; - - if (!netif_is_multiqueue(dev)) - return -ENOENT; - - index = get_netdev_queue_index(queue); - - /* If queue belongs to subordinate dev use its map */ - dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + unsigned int nr_ids; + int j, len, tc = 0; tc = netdev_txq_to_tc(dev, index); if (tc < 0) return -EINVAL; rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps[XPS_CPUS]); - nr_ids = dev_maps ? dev_maps->nr_ids : nr_cpu_ids; + dev_maps = rcu_dereference(dev->xps_maps[type]); + + /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0 + * when dev_maps hasn't been allocated yet, to be backward compatible. + */ + nr_ids = dev_maps ? dev_maps->nr_ids : + (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues); mask = bitmap_zalloc(nr_ids, GFP_KERNEL); if (!mask) { - ret = -ENOMEM; - goto err_rcu_unlock; + rcu_read_unlock(); + return -ENOMEM; } if (!dev_maps || tc >= dev_maps->num_tc) @@ -1368,11 +1364,24 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); bitmap_free(mask); + return len < PAGE_SIZE ? len : -EINVAL; +} -err_rcu_unlock: - rcu_read_unlock(); - return ret; +static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) +{ + struct net_device *dev = queue->dev; + unsigned int index; + + if (!netif_is_multiqueue(dev)) + return -ENOENT; + + index = get_netdev_queue_index(queue); + + /* If queue belongs to subordinate dev use its map */ + dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; + + return xps_queue_show(dev, index, buf, XPS_CPUS); } static ssize_t xps_cpus_store(struct netdev_queue *queue, @@ -1419,56 +1428,11 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; - struct xps_dev_maps *dev_maps; - unsigned int index, nr_ids; - int j, len, ret, tc = 0; - unsigned long *mask; + unsigned int index; index = get_netdev_queue_index(queue); - tc = netdev_txq_to_tc(dev, index); - if (tc < 0) - return -EINVAL; - - rcu_read_lock(); - dev_maps = rcu_dereference(dev->xps_maps[XPS_RXQS]); - nr_ids = dev_maps ? dev_maps->nr_ids : dev->num_rx_queues; - - mask = bitmap_zalloc(nr_ids, GFP_KERNEL); - if (!mask) { - ret = -ENOMEM; - goto err_rcu_unlock; - } - - if (!dev_maps || tc >= dev_maps->num_tc) - goto out_no_maps; - - for (j = 0; j < nr_ids; j++) { - int i, tci = j * dev_maps->num_tc + tc; - struct xps_map *map; - - map = rcu_dereference(dev_maps->attr_map[tci]); - if (!map) - continue; - - for (i = map->len; i--;) { - if (map->queues[i] == index) { - set_bit(j, mask); - break; - } - } - } -out_no_maps: - rcu_read_unlock(); - - len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); - bitmap_free(mask); - - return len < PAGE_SIZE ? len : -EINVAL; - -err_rcu_unlock: - rcu_read_unlock(); - return ret; + return xps_queue_show(dev, index, buf, XPS_RXQS); } static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, -- 2.29.2