From: Guangbin Huang <huangguangbin2@huawei.com>
To: <davem@davemloft.net>, <kuba@kernel.org>, <jiri@nvidia.com>
Cc: <netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<lipeng321@huawei.com>, <chenhao288@hisilicon.com>,
<huangguangbin2@huawei.com>
Subject: [PATCH V2 net-next 9/9] net: hns3: add support for VF setting rx/tx buffer size by devlink param
Date: Thu, 15 Jul 2021 15:45:10 +0800 [thread overview]
Message-ID: <1626335110-50769-10-git-send-email-huangguangbin2@huawei.com> (raw)
In-Reply-To: <1626335110-50769-1-git-send-email-huangguangbin2@huawei.com>
From: Hao Chen <chenhao288@hisilicon.com>
Add support for VF setting rx/tx buffer size by devlink param
Signed-off-by: Hao Chen <chenhao288@hisilicon.com>
Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
---
.../hisilicon/hns3/hns3vf/hclgevf_devlink.c | 88 +++++++++++++++++++++-
.../hisilicon/hns3/hns3vf/hclgevf_devlink.h | 7 ++
.../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 3 +
3 files changed, 97 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index bce598913dc3..4c364055e464 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -34,6 +34,37 @@ static int hclgevf_devlink_info_get(struct devlink *devlink,
version_str);
}
+static void hclgevf_devlink_get_param_setting(struct devlink *devlink)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct pci_dev *pdev = hdev->pdev;
+ union devlink_param_value val;
+ int i, ret;
+
+ ret = devlink_param_driverinit_value_get(devlink,
+ HCLGEVF_DEVLINK_PARAM_ID_RX_BUF_LEN,
+ &val);
+ if (!ret) {
+ hdev->rx_buf_len = val.vu32;
+ hdev->nic.kinfo.rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < hdev->num_tqps; i++)
+ hdev->htqp[i].q.buf_size = hdev->rx_buf_len;
+ } else {
+ dev_err(&pdev->dev,
+ "failed to get rx buffer size, ret = %d\n", ret);
+ }
+
+ ret = devlink_param_driverinit_value_get(devlink,
+ HCLGEVF_DEVLINK_PARAM_ID_TX_BUF_SIZE,
+ &val);
+ if (!ret)
+ hdev->nic.kinfo.devlink_tx_spare_buf_size = val.vu32;
+ else
+ dev_err(&pdev->dev,
+ "failed to get tx buffer size, ret = %d\n", ret);
+}
+
static int hclgevf_devlink_reload_down(struct devlink *devlink,
bool netns_change,
enum devlink_reload_action action,
@@ -106,6 +137,49 @@ static const struct devlink_ops hclgevf_devlink_ops = {
.reload_up = hclgevf_devlink_reload_up,
};
+static int
+hclgevf_devlink_rx_buffer_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGEVF_RX_BUF_LEN_2K 2048
+#define HCLGEVF_RX_BUF_LEN_4K 4096
+
+ if (val.vu32 != HCLGEVF_RX_BUF_LEN_2K &&
+ val.vu32 != HCLGEVF_RX_BUF_LEN_4K) {
+ NL_SET_ERR_MSG_MOD(extack, "Supported size is 2048 or 4096");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param hclgevf_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(HCLGEVF_DEVLINK_PARAM_ID_RX_BUF_LEN,
+ "rx_buffer_len", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ hclgevf_devlink_rx_buffer_size_validate),
+ DEVLINK_PARAM_DRIVER(HCLGEVF_DEVLINK_PARAM_ID_TX_BUF_SIZE,
+ "tx_buffer_size", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+void hclgevf_devlink_set_params_init_values(struct hclgevf_dev *hdev)
+{
+ union devlink_param_value value;
+
+ value.vu32 = hdev->rx_buf_len;
+ devlink_param_driverinit_value_set(hdev->devlink,
+ HCLGEVF_DEVLINK_PARAM_ID_RX_BUF_LEN,
+ value);
+ value.vu32 = 0;
+ devlink_param_driverinit_value_set(hdev->devlink,
+ HCLGEVF_DEVLINK_PARAM_ID_TX_BUF_SIZE,
+ value);
+}
+
int hclgevf_devlink_init(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -130,10 +204,20 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
hdev->devlink = devlink;
+ ret = devlink_params_register(devlink, hclgevf_devlink_params,
+ ARRAY_SIZE(hclgevf_devlink_params));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register devlink params, ret = %d\n", ret);
+ goto out_param_reg_fail;
+ }
+
devlink_reload_enable(devlink);
return 0;
-
+out_param_reg_fail:
+ hdev->devlink = NULL;
+ devlink_unregister(devlink);
out_reg_fail:
devlink_free(devlink);
return ret;
@@ -148,6 +232,8 @@ void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
devlink_reload_disable(devlink);
+ devlink_params_unregister(devlink, hclgevf_devlink_params,
+ ARRAY_SIZE(hclgevf_devlink_params));
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
index e09ea3d8a963..2159ec4a3523 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
@@ -6,10 +6,17 @@
#include "hclgevf_main.h"
+enum hclgevf_devlink_param_id {
+ HCLGEVF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ HCLGEVF_DEVLINK_PARAM_ID_RX_BUF_LEN,
+ HCLGEVF_DEVLINK_PARAM_ID_TX_BUF_SIZE,
+};
+
struct hclgevf_devlink_priv {
struct hclgevf_dev *hdev;
};
+void hclgevf_devlink_set_params_init_values(struct hclgevf_dev *hdev);
int hclgevf_devlink_init(struct hclgevf_dev *hdev);
void hclgevf_devlink_uninit(struct hclgevf_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 1e03c4d16125..ce7d652594e1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3374,6 +3374,9 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hclgevf_devlink_set_params_init_values(hdev);
+ devlink_params_publish(hdev->devlink);
+
ret = hclgevf_alloc_tqps(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
--
2.8.1
prev parent reply other threads:[~2021-07-15 7:48 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-15 7:45 [PATCH V2 net-next 0/9] net: hns3: add support devlink Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 1/9] devlink: add documentation for hns3 driver Guangbin Huang
2021-07-16 6:00 ` Jakub Kicinski
2021-07-19 1:21 ` huangguangbin (A)
2021-07-15 7:45 ` [PATCH V2 net-next 2/9] net: hns3: add support for registering devlink for PF Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 3/9] net: hns3: add support for registering devlink for VF Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 4/9] net: hns3: add support for devlink get info for PF Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 5/9] net: hns3: add support for devlink get info for VF Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 6/9] net: hns3: add devlink reload support for PF Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 7/9] net: hns3: add devlink reload support for VF Guangbin Huang
2021-07-15 7:45 ` [PATCH V2 net-next 8/9] net: hns3: add support for PF setting rx/tx buffer size by devlink param Guangbin Huang
2021-07-15 7:45 ` Guangbin Huang [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1626335110-50769-10-git-send-email-huangguangbin2@huawei.com \
--to=huangguangbin2@huawei.com \
--cc=chenhao288@hisilicon.com \
--cc=davem@davemloft.net \
--cc=jiri@nvidia.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lipeng321@huawei.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).