All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
@ 2021-04-26 18:19 Sridhar Samudrala
  2021-05-17 21:49 ` Creeley, Brett
  0 siblings, 1 reply; 5+ messages in thread
From: Sridhar Samudrala @ 2021-04-26 18:19 UTC (permalink / raw)
  To: intel-wired-lan

Introduce a devlink parameter 'num_qps_per_vf' to allow user
to configure the maximum number of queue pairs given to SR-IOV
VFs before they are created.

This is currently determined by the driver based on the number
of SR-IOV VFs created. In order to keep this behavior by default
the parameter is initialized to 0. To change the default behavior,
user can set num_qps_per_vf parameter via devlink and this will
be used as the preferred value to determine the queues and vectors
assigned per VF.

USAGE:
On a 2 port NIC
 # devlink dev param show
 pci/0000:42:00.0:
   name num_qps_per_vf type driver-specific
     values:
       cmode runtime value 0
 pci/0000:42:00.1:
   name num_qps_per_vf type driver-specific
     values:
       cmode runtime value 0

 /* Set num_qps_per_vf to 4 */
 # devlink dev param set pci/0000:42:00.0 name num_qps_per_vf value 4 cmode runtime

 # devlink dev param show pci/0000:42:00.0 name num_qps_per_vf
 pci/0000:42:00.0:
   name num_qps_per_vf type driver-specific
     values:
       cmode runtime value 4

 # echo 8 > /sys/class/net/enp66s0f0/device/sriov_numvfs

This will create 8 VFs with 4 queue pairs and 5 vectors per VF
compared to the default behavior of 16 queue pairs and 17 vectors
per VF.

v2:
Fixed kdoc for ice_devlink_num_qps_per_vf_validate()

Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
---
 Documentation/networking/devlink/ice.rst      |  23 ++++
 drivers/net/ethernet/intel/ice/ice_devlink.c  | 110 +++++++++++++++++-
 drivers/net/ethernet/intel/ice/ice_main.c     |   3 +
 .../net/ethernet/intel/ice/ice_virtchnl_pf.c  |   5 +-
 4 files changed, 139 insertions(+), 2 deletions(-)

diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index a432dc419fa4..38bed190fa48 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -193,3 +193,26 @@ Users can request an immediate capture of a snapshot via the
     0000000000000210 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
 
     $ devlink region delete pci/0000:01:00.0/device-caps snapshot 1
+
+Parameters
+==========
+
+The ``ice`` driver implements the following driver-specific
+parameters.
+
+.. list-table:: Driver-specific parameters implemented
+   :widths: 5 5 5 85
+
+   * - Name
+     - Type
+     - Mode
+     - Description
+   * - ``num_qps_per_vf``
+     - u16
+     - runtime
+     - Number of queue pairs assigned to SR-IOV VFs. The default
+       value is 0 indicating that the driver will determine based
+       on the number of SR-IOV VFs created and the available resources.
+       It can be set to a value between 1 to 16 and is used as a
+       preferred value in determining the queues and vectors assigned
+       per VF.
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index cf685eeea198..8bf9fd77cbef 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -479,6 +479,102 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
 	return devlink_priv(devlink);
 }
 
+enum ice_devlink_param_id {
+	ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+	ICE_DEVLINK_PARAM_ID_NUM_QPS_PER_VF,
+};
+
+/**
+ * ice_devlink_num_qps_per_vf_get - Get the current number of qps per vf
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to get
+ * @ctx: context to return the parameter value
+ *
+ * Returns: zero on success, or an error code on failure.
+ */
+static int
+ice_devlink_num_qps_per_vf_get(struct devlink *devlink, u32 id,
+			       struct devlink_param_gset_ctx *ctx)
+{
+	struct ice_pf *pf = (struct ice_pf *)devlink_priv(devlink);
+
+	if (id != ICE_DEVLINK_PARAM_ID_NUM_QPS_PER_VF)
+		return -EINVAL;
+
+	ctx->val.vu16 = pf->num_qps_per_vf;
+
+	return 0;
+}
+
+/**
+ * ice_devlink_num_qps_per_vf_validate - Validate the number of qps per vf
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to validate
+ * @val: value to be validated
+ * @extack: netlink extended ACK structure
+ *
+ * Check that the value passed is less than the max qps allowed for a VF and
+ * no VFs are created yet.
+ * Returns: zero on success, or an error code on failure and extack with a
+ * reason for failure.
+ */
+static int
+ice_devlink_num_qps_per_vf_validate(struct devlink *devlink, u32 id,
+				    union devlink_param_value val,
+				    struct netlink_ext_ack *extack)
+{
+	struct ice_pf *pf = (struct ice_pf *)devlink_priv(devlink);
+
+	if (id != ICE_DEVLINK_PARAM_ID_NUM_QPS_PER_VF)
+		return -EINVAL;
+
+	if (val.vu16 > ICE_MAX_RSS_QS_PER_VF) {
+		NL_SET_ERR_MSG_MOD(extack, "Value is greater than max allowed");
+		return -EINVAL;
+	}
+
+	if (pf->num_alloc_vfs) {
+		NL_SET_ERR_MSG_MOD(extack, "Cannot set after VFs are created");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+/**
+ * ice_devlink_num_qps_per_vf_set - Set the number of qps per vf
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to return the parameter value
+ *
+ * Returns: zero on success, or an error code on failure.
+ */
+static int
+ice_devlink_num_qps_per_vf_set(struct devlink *devlink, u32 id,
+			       struct devlink_param_gset_ctx *ctx)
+{
+	struct ice_pf *pf = (struct ice_pf *)devlink_priv(devlink);
+
+	if (id != ICE_DEVLINK_PARAM_ID_NUM_QPS_PER_VF)
+		return -EINVAL;
+
+	pf->num_qps_per_vf = ctx->val.vu16;
+	pf->num_msix_per_vf = pf->num_qps_per_vf + 1;
+
+	return 0;
+}
+
+/* devlink parameters for the ice driver */
+static const struct devlink_param ice_devlink_params[] = {
+	DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_NUM_QPS_PER_VF,
+			     "num_qps_per_vf",
+			     DEVLINK_PARAM_TYPE_U16,
+			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+			     ice_devlink_num_qps_per_vf_get,
+			     ice_devlink_num_qps_per_vf_set,
+			     ice_devlink_num_qps_per_vf_validate),
+};
+
 /**
  * ice_devlink_register - Register devlink interface for this PF
  * @pf: the PF to register the devlink for.
@@ -499,6 +595,13 @@ int ice_devlink_register(struct ice_pf *pf)
 		return err;
 	}
 
+	err = devlink_params_register(devlink, ice_devlink_params,
+				      ARRAY_SIZE(ice_devlink_params));
+	if (err) {
+		dev_err(dev, "devlink params registration failed: %d\n", err);
+		return err;
+	}
+
 	return 0;
 }
 
@@ -510,7 +613,11 @@ int ice_devlink_register(struct ice_pf *pf)
  */
 void ice_devlink_unregister(struct ice_pf *pf)
 {
-	devlink_unregister(priv_to_devlink(pf));
+	struct devlink *devlink = priv_to_devlink(pf);
+
+	devlink_params_unregister(devlink, ice_devlink_params,
+				  ARRAY_SIZE(ice_devlink_params));
+	devlink_unregister(devlink);
 }
 
 /**
@@ -542,6 +649,7 @@ int ice_devlink_create_port(struct ice_vsi *vsi)
 	attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
 	attrs.phys.port_number = pi->lport;
 	devlink_port_attrs_set(&vsi->devlink_port, &attrs);
+
 	err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
 	if (err) {
 		dev_err(dev, "devlink_port_register failed: %d\n", err);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4ee85a217c6f..89f30255b711 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4092,6 +4092,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
 	}
 
 	ice_devlink_init_regions(pf);
+	devlink_params_publish(priv_to_devlink(pf));
 
 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
@@ -4285,6 +4286,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
 	devm_kfree(dev, pf->vsi);
 err_init_pf_unroll:
 	ice_deinit_pf(pf);
+	devlink_params_unpublish(priv_to_devlink(pf));
 	ice_devlink_destroy_regions(pf);
 	ice_deinit_hw(hw);
 err_exit_unroll:
@@ -4398,6 +4400,7 @@ static void ice_remove(struct pci_dev *pdev)
 		ice_vsi_free_q_vectors(pf->vsi[i]);
 	}
 	ice_deinit_pf(pf);
+	devlink_params_unpublish(priv_to_devlink(pf));
 	ice_devlink_destroy_regions(pf);
 	ice_deinit_hw(&pf->hw);
 	ice_devlink_unregister(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a1d22d2aa0bd..18538b1c57c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -637,6 +637,7 @@ void ice_free_vfs(struct ice_pf *pf)
 
 	tmp = pf->num_alloc_vfs;
 	pf->num_qps_per_vf = 0;
+	pf->num_msix_per_vf = 0;
 	pf->num_alloc_vfs = 0;
 	for (i = 0; i < tmp; i++) {
 		if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
@@ -1228,7 +1229,9 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
 	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
 		pf->irq_tracker->num_entries;
 	msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
-	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+	if (pf->num_msix_per_vf && msix_avail_per_vf >= pf->num_msix_per_vf) {
+		num_msix_per_vf = pf->num_msix_per_vf;
+	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
 		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
 		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
  2021-04-26 18:19 [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink Sridhar Samudrala
@ 2021-05-17 21:49 ` Creeley, Brett
  2021-06-23 16:21   ` Samudrala, Sridhar
  0 siblings, 1 reply; 5+ messages in thread
From: Creeley, Brett @ 2021-05-17 21:49 UTC (permalink / raw)
  To: intel-wired-lan

> -----Original Message-----
> From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf Of Sridhar Samudrala
> Sent: Monday, April 26, 2021 11:20 AM
> To: intel-wired-lan at lists.osuosl.org; Nguyen, Anthony L <anthony.l.nguyen@intel.com>; Samudrala, Sridhar
> <sridhar.samudrala@intel.com>
> Subject: [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
> 
> Introduce a devlink parameter 'num_qps_per_vf' to allow user
> to configure the maximum number of queue pairs given to SR-IOV
> VFs before they are created.
> 
> This is currently determined by the driver based on the number
> of SR-IOV VFs created. In order to keep this behavior by default
> the parameter is initialized to 0. To change the default behavior,
> user can set num_qps_per_vf parameter via devlink and this will
> be used as the preferred value to determine the queues and vectors
> assigned per VF.

What if the host administrator wants to give the VF a different number
of vectors than queues? For example, if the admin knows the VF
instance will be exercising VF RDMA and the VF needs more vectors
for RDMA traffic.

Should we have 2 separate values, i.e. "num_qps_per_vf" and
"num_msix_per_vf"?

> 
> USAGE:
> On a 2 port NIC
>  # devlink dev param show
>  pci/0000:42:00.0:
>    name num_qps_per_vf type driver-specific
>      values:
>        cmode runtime value 0
>  pci/0000:42:00.1:
>    name num_qps_per_vf type driver-specific
>      values:
>        cmode runtime value 0
> 
>  /* Set num_qps_per_vf to 4 */
>  # devlink dev param set pci/0000:42:00.0 name num_qps_per_vf value 4 cmode runtime
> 
>  # devlink dev param show pci/0000:42:00.0 name num_qps_per_vf
>  pci/0000:42:00.0:
>    name num_qps_per_vf type driver-specific
>      values:
>        cmode runtime value 4
> 
>  # echo 8 > /sys/class/net/enp66s0f0/device/sriov_numvfs
> 
> This will create 8 VFs with 4 queue pairs and 5 vectors per VF
> compared to the default behavior of 16 queue pairs and 17 vectors
> per VF.
> 
> v2:
> Fixed kdoc for ice_devlink_num_qps_per_vf_validate()
> 
> Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
> ---
>  Documentation/networking/devlink/ice.rst      |  23 ++++
>  drivers/net/ethernet/intel/ice/ice_devlink.c  | 110 +++++++++++++++++-
>  drivers/net/ethernet/intel/ice/ice_main.c     |   3 +
>  .../net/ethernet/intel/ice/ice_virtchnl_pf.c  |   5 +-
>  4 files changed, 139 insertions(+), 2 deletions(-)
> 

<snip>


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
  2021-05-17 21:49 ` Creeley, Brett
@ 2021-06-23 16:21   ` Samudrala, Sridhar
  2021-06-23 22:15     ` Singhai, Anjali
  0 siblings, 1 reply; 5+ messages in thread
From: Samudrala, Sridhar @ 2021-06-23 16:21 UTC (permalink / raw)
  To: intel-wired-lan

On 5/17/2021 2:49 PM, Creeley, Brett wrote:
>> -----Original Message-----
>> From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf Of Sridhar Samudrala
>> Sent: Monday, April 26, 2021 11:20 AM
>> To: intel-wired-lan at lists.osuosl.org; Nguyen, Anthony L <anthony.l.nguyen@intel.com>; Samudrala, Sridhar
>> <sridhar.samudrala@intel.com>
>> Subject: [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
>>
>> Introduce a devlink parameter 'num_qps_per_vf' to allow user
>> to configure the maximum number of queue pairs given to SR-IOV
>> VFs before they are created.
>>
>> This is currently determined by the driver based on the number
>> of SR-IOV VFs created. In order to keep this behavior by default
>> the parameter is initialized to 0. To change the default behavior,
>> user can set num_qps_per_vf parameter via devlink and this will
>> be used as the preferred value to determine the queues and vectors
>> assigned per VF.
> What if the host administrator wants to give the VF a different number
> of vectors than queues? For example, if the admin knows the VF
> instance will be exercising VF RDMA and the VF needs more vectors
> for RDMA traffic.
>
> Should we have 2 separate values, i.e. "num_qps_per_vf" and
> "num_msix_per_vf"?

I missed responding to this comment.
Sure. We can add num_msix_pr_vf as a later patch to enable additional 
vectors for RDMA.

Tony,
Can this patch be included in your series when you submit to netdev?
OR do i need to rebase it based on the latest net-next?

Thanks
Sridhar
>
>> USAGE:
>> On a 2 port NIC
>>   # devlink dev param show
>>   pci/0000:42:00.0:
>>     name num_qps_per_vf type driver-specific
>>       values:
>>         cmode runtime value 0
>>   pci/0000:42:00.1:
>>     name num_qps_per_vf type driver-specific
>>       values:
>>         cmode runtime value 0
>>
>>   /* Set num_qps_per_vf to 4 */
>>   # devlink dev param set pci/0000:42:00.0 name num_qps_per_vf value 4 cmode runtime
>>
>>   # devlink dev param show pci/0000:42:00.0 name num_qps_per_vf
>>   pci/0000:42:00.0:
>>     name num_qps_per_vf type driver-specific
>>       values:
>>         cmode runtime value 4
>>
>>   # echo 8 > /sys/class/net/enp66s0f0/device/sriov_numvfs
>>
>> This will create 8 VFs with 4 queue pairs and 5 vectors per VF
>> compared to the default behavior of 16 queue pairs and 17 vectors
>> per VF.
>>
>> v2:
>> Fixed kdoc for ice_devlink_num_qps_per_vf_validate()
>>
>> Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
>> ---
>>   Documentation/networking/devlink/ice.rst      |  23 ++++
>>   drivers/net/ethernet/intel/ice/ice_devlink.c  | 110 +++++++++++++++++-
>>   drivers/net/ethernet/intel/ice/ice_main.c     |   3 +
>>   .../net/ethernet/intel/ice/ice_virtchnl_pf.c  |   5 +-
>>   4 files changed, 139 insertions(+), 2 deletions(-)
>>
> <snip>
>


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
  2021-06-23 16:21   ` Samudrala, Sridhar
@ 2021-06-23 22:15     ` Singhai, Anjali
  2021-06-23 22:43       ` Samudrala, Sridhar
  0 siblings, 1 reply; 5+ messages in thread
From: Singhai, Anjali @ 2021-06-23 22:15 UTC (permalink / raw)
  To: intel-wired-lan

Sridhar, how do you do asymmetric number of queues and vectors to the VF using devlink? Or this ends up as a configuration on VF representors... I hope not.

So I am guessing you can either bulk set the number of queues for each of the VFs or you can set it per SRIOV VF ports?

Thanks
Anjali

-----Original Message-----
From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf Of Samudrala, Sridhar
Sent: Wednesday, June 23, 2021 9:21 AM
To: Creeley, Brett <brett.creeley@intel.com>; intel-wired-lan at lists.osuosl.org; Nguyen, Anthony L <anthony.l.nguyen@intel.com>
Cc: Ismail, Mustafa <mustafa.ismail@intel.com>; Saleem, Shiraz <shiraz.saleem@intel.com>
Subject: Re: [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink

On 5/17/2021 2:49 PM, Creeley, Brett wrote:
>> -----Original Message-----
>> From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf 
>> Of Sridhar Samudrala
>> Sent: Monday, April 26, 2021 11:20 AM
>> To: intel-wired-lan at lists.osuosl.org; Nguyen, Anthony L 
>> <anthony.l.nguyen@intel.com>; Samudrala, Sridhar 
>> <sridhar.samudrala@intel.com>
>> Subject: [Intel-wired-lan] [PATCH net-next v2] ice: Enable 
>> configuration of number of qps per VF via devlink
>>
>> Introduce a devlink parameter 'num_qps_per_vf' to allow user to 
>> configure the maximum number of queue pairs given to SR-IOV VFs 
>> before they are created.
>>
>> This is currently determined by the driver based on the number of 
>> SR-IOV VFs created. In order to keep this behavior by default the 
>> parameter is initialized to 0. To change the default behavior, user 
>> can set num_qps_per_vf parameter via devlink and this will be used as 
>> the preferred value to determine the queues and vectors assigned per 
>> VF.
> What if the host administrator wants to give the VF a different number 
> of vectors than queues? For example, if the admin knows the VF 
> instance will be exercising VF RDMA and the VF needs more vectors for 
> RDMA traffic.
>
> Should we have 2 separate values, i.e. "num_qps_per_vf" and 
> "num_msix_per_vf"?

I missed responding to this comment.
Sure. We can add num_msix_pr_vf as a later patch to enable additional vectors for RDMA.

Tony,
Can this patch be included in your series when you submit to netdev?
OR do i need to rebase it based on the latest net-next?

Thanks
Sridhar
>
>> USAGE:
>> On a 2 port NIC
>>   # devlink dev param show
>>   pci/0000:42:00.0:
>>     name num_qps_per_vf type driver-specific
>>       values:
>>         cmode runtime value 0
>>   pci/0000:42:00.1:
>>     name num_qps_per_vf type driver-specific
>>       values:
>>         cmode runtime value 0
>>
>>   /* Set num_qps_per_vf to 4 */
>>   # devlink dev param set pci/0000:42:00.0 name num_qps_per_vf value 
>> 4 cmode runtime
>>
>>   # devlink dev param show pci/0000:42:00.0 name num_qps_per_vf
>>   pci/0000:42:00.0:
>>     name num_qps_per_vf type driver-specific
>>       values:
>>         cmode runtime value 4
>>
>>   # echo 8 > /sys/class/net/enp66s0f0/device/sriov_numvfs
>>
>> This will create 8 VFs with 4 queue pairs and 5 vectors per VF 
>> compared to the default behavior of 16 queue pairs and 17 vectors per 
>> VF.
>>
>> v2:
>> Fixed kdoc for ice_devlink_num_qps_per_vf_validate()
>>
>> Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
>> ---
>>   Documentation/networking/devlink/ice.rst      |  23 ++++
>>   drivers/net/ethernet/intel/ice/ice_devlink.c  | 110 +++++++++++++++++-
>>   drivers/net/ethernet/intel/ice/ice_main.c     |   3 +
>>   .../net/ethernet/intel/ice/ice_virtchnl_pf.c  |   5 +-
>>   4 files changed, 139 insertions(+), 2 deletions(-)
>>
> <snip>
>

_______________________________________________
Intel-wired-lan mailing list
Intel-wired-lan at osuosl.org
https://lists.osuosl.org/mailman/listinfo/intel-wired-lan

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
  2021-06-23 22:15     ` Singhai, Anjali
@ 2021-06-23 22:43       ` Samudrala, Sridhar
  0 siblings, 0 replies; 5+ messages in thread
From: Samudrala, Sridhar @ 2021-06-23 22:43 UTC (permalink / raw)
  To: intel-wired-lan

On 6/23/2021 3:15 PM, Singhai, Anjali wrote:
> Sridhar, how do you do asymmetric number of queues and vectors to the VF using devlink? Or this ends up as a configuration on VF representors... I hope not.
>
> So I am guessing you can either bulk set the number of queues for each of the VFs or you can set it per SRIOV VF ports?

This is first step in a series of patches to make VF resources configurable.
The next step is to enable creating a devlink port for each VF and 
support port specific parameters
via the devlink port.
Then we can enable changing the queue pairs or msix-vectors for a 
specific instance of a VF
before it is activated rather? than all VFs having equal number of 
resources.

>
> Thanks
> Anjali
>
> -----Original Message-----
> From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf Of Samudrala, Sridhar
> Sent: Wednesday, June 23, 2021 9:21 AM
> To: Creeley, Brett <brett.creeley@intel.com>; intel-wired-lan at lists.osuosl.org; Nguyen, Anthony L <anthony.l.nguyen@intel.com>
> Cc: Ismail, Mustafa <mustafa.ismail@intel.com>; Saleem, Shiraz <shiraz.saleem@intel.com>
> Subject: Re: [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink
>
> On 5/17/2021 2:49 PM, Creeley, Brett wrote:
>>> -----Original Message-----
>>> From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf
>>> Of Sridhar Samudrala
>>> Sent: Monday, April 26, 2021 11:20 AM
>>> To: intel-wired-lan at lists.osuosl.org; Nguyen, Anthony L
>>> <anthony.l.nguyen@intel.com>; Samudrala, Sridhar
>>> <sridhar.samudrala@intel.com>
>>> Subject: [Intel-wired-lan] [PATCH net-next v2] ice: Enable
>>> configuration of number of qps per VF via devlink
>>>
>>> Introduce a devlink parameter 'num_qps_per_vf' to allow user to
>>> configure the maximum number of queue pairs given to SR-IOV VFs
>>> before they are created.
>>>
>>> This is currently determined by the driver based on the number of
>>> SR-IOV VFs created. In order to keep this behavior by default the
>>> parameter is initialized to 0. To change the default behavior, user
>>> can set num_qps_per_vf parameter via devlink and this will be used as
>>> the preferred value to determine the queues and vectors assigned per
>>> VF.
>> What if the host administrator wants to give the VF a different number
>> of vectors than queues? For example, if the admin knows the VF
>> instance will be exercising VF RDMA and the VF needs more vectors for
>> RDMA traffic.
>>
>> Should we have 2 separate values, i.e. "num_qps_per_vf" and
>> "num_msix_per_vf"?
> I missed responding to this comment.
> Sure. We can add num_msix_pr_vf as a later patch to enable additional vectors for RDMA.
>
> Tony,
> Can this patch be included in your series when you submit to netdev?
> OR do i need to rebase it based on the latest net-next?
>
> Thanks
> Sridhar
>>> USAGE:
>>> On a 2 port NIC
>>>    # devlink dev param show
>>>    pci/0000:42:00.0:
>>>      name num_qps_per_vf type driver-specific
>>>        values:
>>>          cmode runtime value 0
>>>    pci/0000:42:00.1:
>>>      name num_qps_per_vf type driver-specific
>>>        values:
>>>          cmode runtime value 0
>>>
>>>    /* Set num_qps_per_vf to 4 */
>>>    # devlink dev param set pci/0000:42:00.0 name num_qps_per_vf value
>>> 4 cmode runtime
>>>
>>>    # devlink dev param show pci/0000:42:00.0 name num_qps_per_vf
>>>    pci/0000:42:00.0:
>>>      name num_qps_per_vf type driver-specific
>>>        values:
>>>          cmode runtime value 4
>>>
>>>    # echo 8 > /sys/class/net/enp66s0f0/device/sriov_numvfs
>>>
>>> This will create 8 VFs with 4 queue pairs and 5 vectors per VF
>>> compared to the default behavior of 16 queue pairs and 17 vectors per
>>> VF.
>>>
>>> v2:
>>> Fixed kdoc for ice_devlink_num_qps_per_vf_validate()
>>>
>>> Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
>>> ---
>>>    Documentation/networking/devlink/ice.rst      |  23 ++++
>>>    drivers/net/ethernet/intel/ice/ice_devlink.c  | 110 +++++++++++++++++-
>>>    drivers/net/ethernet/intel/ice/ice_main.c     |   3 +
>>>    .../net/ethernet/intel/ice/ice_virtchnl_pf.c  |   5 +-
>>>    4 files changed, 139 insertions(+), 2 deletions(-)
>>>
>> <snip>
>>
> _______________________________________________
> Intel-wired-lan mailing list
> Intel-wired-lan at osuosl.org
> https://lists.osuosl.org/mailman/listinfo/intel-wired-lan


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-06-23 22:43 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-26 18:19 [Intel-wired-lan] [PATCH net-next v2] ice: Enable configuration of number of qps per VF via devlink Sridhar Samudrala
2021-05-17 21:49 ` Creeley, Brett
2021-06-23 16:21   ` Samudrala, Sridhar
2021-06-23 22:15     ` Singhai, Anjali
2021-06-23 22:43       ` Samudrala, Sridhar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.