netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] ibmvnic: store RX and TX subCRQ handle array in ibmvnic_adapter struct
@ 2020-07-01 21:25 Cristobal Forno
  2020-07-01 21:50 ` Thomas Falcon
  0 siblings, 1 reply; 3+ messages in thread
From: Cristobal Forno @ 2020-07-01 21:25 UTC (permalink / raw)
  To: netdev; +Cc: tlfalcon, Cristobal Forno

Currently the driver reads RX and TX subCRQ handle array directly from
a DMA-mapped buffer address when it needs to make a H_SEND_SUBCRQ
hcall. This patch stores that information in the ibmvnic_sub_crq_queue
structure instead of reading from the buffer received at login.

Signed-off-by: Cristobal Forno <cforno12@linux.ibm.com>
---
 drivers/net/ethernet/ibm/ibmvnic.c | 27 ++++++++++++++++++++-------
 drivers/net/ethernet/ibm/ibmvnic.h |  1 +
 2 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fd7eae25fe9..ca0d88aab6da 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -305,6 +305,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 			      struct ibmvnic_rx_pool *pool)
 {
+	u64 *handle_array = adapter->rx_scrq[pool->index]->handle_array;
 	int count = pool->size - atomic_read(&pool->available);
 	struct device *dev = &adapter->vdev->dev;
 	int buffers_added = 0;
@@ -314,7 +315,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 	unsigned int offset;
 	dma_addr_t dma_addr;
 	unsigned char *dst;
-	u64 *handle_array;
 	int shift = 0;
 	int index;
 	int i;
@@ -322,10 +322,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 	if (!pool->active)
 		return;
 
-	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
-				      be32_to_cpu(adapter->login_rsp_buf->
-				      off_rxadd_subcrqs));
-
 	for (i = 0; i < count; ++i) {
 		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
 		if (!skb) {
@@ -1553,8 +1549,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 	tx_scrq = adapter->tx_scrq[queue_num];
 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
-	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
-		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+	handle_array = tx_scrq->handle_array;
 
 	index = tx_pool->free_map[tx_pool->consumer_index];
 
@@ -4292,6 +4287,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
 	struct net_device *netdev = adapter->netdev;
 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
 	struct ibmvnic_login_buffer *login = adapter->login_buf;
+	int num_tx_pools;
+	int num_rx_pools;
 	int i;
 
 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -4326,6 +4323,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
 		ibmvnic_remove(adapter->vdev);
 		return -EIO;
 	}
+
+	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+	for (i = 0; i < num_tx_pools; i++)
+		adapter->tx_scrq[i]->handle_array =
+			(u64 *)((u8 *)(adapter->login_rsp_buf) +
+				be32_to_cpu(adapter->login_rsp_buf->
+					    off_txsubm_subcrqs));
+
+	for (i = 0; i < num_rx_pools; i++)
+		adapter->rx_scrq[i]->handle_array =
+			(u64 *)((u8 *)(adapter->login_rsp_buf) +
+				be32_to_cpu(adapter->login_rsp_buf->
+					    off_rxadd_subcrqs));
+
 	release_login_buffer(adapter);
 	complete(&adapter->init_done);
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f8416e1d4cf0..e51c72d1e357 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
 	struct ibmvnic_adapter *adapter;
 	atomic_t used;
 	char name[32];
+	u64 *handle_array;
 };
 
 struct ibmvnic_long_term_buff {
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] ibmvnic: store RX and TX subCRQ handle array in ibmvnic_adapter struct
  2020-07-01 21:25 [PATCH] ibmvnic: store RX and TX subCRQ handle array in ibmvnic_adapter struct Cristobal Forno
@ 2020-07-01 21:50 ` Thomas Falcon
  2020-07-09 19:39   ` Cris Forno
  0 siblings, 1 reply; 3+ messages in thread
From: Thomas Falcon @ 2020-07-01 21:50 UTC (permalink / raw)
  To: Cristobal Forno, netdev

On 7/1/20 4:25 PM, Cristobal Forno wrote:
> Currently the driver reads RX and TX subCRQ handle array directly from
> a DMA-mapped buffer address when it needs to make a H_SEND_SUBCRQ
> hcall. This patch stores that information in the ibmvnic_sub_crq_queue
> structure instead of reading from the buffer received at login.
>   

Hi, thank you for the submission. I think it would be better, however, 
if each subCRQ structure had a member denoting its respective handle 
rather than a pointer to the handle array. This would allow us to 
discard the login_rsp buffer later when it is no longer needed.

Tom

> Signed-off-by: Cristobal Forno <cforno12@linux.ibm.com>
> ---
>   drivers/net/ethernet/ibm/ibmvnic.c | 27 ++++++++++++++++++++-------
>   drivers/net/ethernet/ibm/ibmvnic.h |  1 +
>   2 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index 0fd7eae25fe9..ca0d88aab6da 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -305,6 +305,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
>   static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>   			      struct ibmvnic_rx_pool *pool)
>   {
> +	u64 *handle_array = adapter->rx_scrq[pool->index]->handle_array;
>   	int count = pool->size - atomic_read(&pool->available);
>   	struct device *dev = &adapter->vdev->dev;
>   	int buffers_added = 0;
> @@ -314,7 +315,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>   	unsigned int offset;
>   	dma_addr_t dma_addr;
>   	unsigned char *dst;
> -	u64 *handle_array;
>   	int shift = 0;
>   	int index;
>   	int i;
> @@ -322,10 +322,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>   	if (!pool->active)
>   		return;
>   
> -	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
> -				      be32_to_cpu(adapter->login_rsp_buf->
> -				      off_rxadd_subcrqs));
> -
>   	for (i = 0; i < count; ++i) {
>   		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
>   		if (!skb) {
> @@ -1553,8 +1549,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
>   
>   	tx_scrq = adapter->tx_scrq[queue_num];
>   	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
> -	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
> -		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
> +	handle_array = tx_scrq->handle_array;
>   
>   	index = tx_pool->free_map[tx_pool->consumer_index];
>   
> @@ -4292,6 +4287,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>   	struct net_device *netdev = adapter->netdev;
>   	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
>   	struct ibmvnic_login_buffer *login = adapter->login_buf;
> +	int num_tx_pools;
> +	int num_rx_pools;
>   	int i;
>   
>   	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
> @@ -4326,6 +4323,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>   		ibmvnic_remove(adapter->vdev);
>   		return -EIO;
>   	}
> +
> +	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
> +	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
> +
> +	for (i = 0; i < num_tx_pools; i++)
> +		adapter->tx_scrq[i]->handle_array =
> +			(u64 *)((u8 *)(adapter->login_rsp_buf) +
> +				be32_to_cpu(adapter->login_rsp_buf->
> +					    off_txsubm_subcrqs));
> +
> +	for (i = 0; i < num_rx_pools; i++)
> +		adapter->rx_scrq[i]->handle_array =
> +			(u64 *)((u8 *)(adapter->login_rsp_buf) +
> +				be32_to_cpu(adapter->login_rsp_buf->
> +					    off_rxadd_subcrqs));
> +
>   	release_login_buffer(adapter);
>   	complete(&adapter->init_done);
>   
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
> index f8416e1d4cf0..e51c72d1e357 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.h
> +++ b/drivers/net/ethernet/ibm/ibmvnic.h
> @@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
>   	struct ibmvnic_adapter *adapter;
>   	atomic_t used;
>   	char name[32];
> +	u64 *handle_array;
>   };
>   
>   struct ibmvnic_long_term_buff {

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] ibmvnic: store RX and TX subCRQ handle array in ibmvnic_adapter struct
  2020-07-01 21:50 ` Thomas Falcon
@ 2020-07-09 19:39   ` Cris Forno
  0 siblings, 0 replies; 3+ messages in thread
From: Cris Forno @ 2020-07-09 19:39 UTC (permalink / raw)
  To: Thomas Falcon, netdev

Thomas Falcon <tlfalcon@linux.ibm.com> writes:

> On 7/1/20 4:25 PM, Cristobal Forno wrote:
>> Currently the driver reads RX and TX subCRQ handle array directly from
>> a DMA-mapped buffer address when it needs to make a H_SEND_SUBCRQ
>> hcall. This patch stores that information in the ibmvnic_sub_crq_queue
>> structure instead of reading from the buffer received at login.
>>   
>
> Hi, thank you for the submission. I think it would be better, however, 
> if each subCRQ structure had a member denoting its respective handle 
> rather than a pointer to the handle array. This would allow us to 
> discard the login_rsp buffer later when it is no longer needed.
>
> Tom

Hi, thanks for you suggestion. I have sent another patch (v2) with your
suggestions.

-Cristobal Forno
>
>> Signed-off-by: Cristobal Forno <cforno12@linux.ibm.com>
>> ---
>>   drivers/net/ethernet/ibm/ibmvnic.c | 27 ++++++++++++++++++++-------
>>   drivers/net/ethernet/ibm/ibmvnic.h |  1 +
>>   2 files changed, 21 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
>> index 0fd7eae25fe9..ca0d88aab6da 100644
>> --- a/drivers/net/ethernet/ibm/ibmvnic.c
>> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
>> @@ -305,6 +305,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
>>   static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>>   			      struct ibmvnic_rx_pool *pool)
>>   {
>> +	u64 *handle_array = adapter->rx_scrq[pool->index]->handle_array;
>>   	int count = pool->size - atomic_read(&pool->available);
>>   	struct device *dev = &adapter->vdev->dev;
>>   	int buffers_added = 0;
>> @@ -314,7 +315,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>>   	unsigned int offset;
>>   	dma_addr_t dma_addr;
>>   	unsigned char *dst;
>> -	u64 *handle_array;
>>   	int shift = 0;
>>   	int index;
>>   	int i;
>> @@ -322,10 +322,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>>   	if (!pool->active)
>>   		return;
>>   
>> -	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
>> -				      be32_to_cpu(adapter->login_rsp_buf->
>> -				      off_rxadd_subcrqs));
>> -
>>   	for (i = 0; i < count; ++i) {
>>   		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
>>   		if (!skb) {
>> @@ -1553,8 +1549,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
>>   
>>   	tx_scrq = adapter->tx_scrq[queue_num];
>>   	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
>> -	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
>> -		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
>> +	handle_array = tx_scrq->handle_array;
>>   
>>   	index = tx_pool->free_map[tx_pool->consumer_index];
>>   
>> @@ -4292,6 +4287,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>>   	struct net_device *netdev = adapter->netdev;
>>   	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
>>   	struct ibmvnic_login_buffer *login = adapter->login_buf;
>> +	int num_tx_pools;
>> +	int num_rx_pools;
>>   	int i;
>>   
>>   	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
>> @@ -4326,6 +4323,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>>   		ibmvnic_remove(adapter->vdev);
>>   		return -EIO;
>>   	}
>> +
>> +	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
>> +	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
>> +
>> +	for (i = 0; i < num_tx_pools; i++)
>> +		adapter->tx_scrq[i]->handle_array =
>> +			(u64 *)((u8 *)(adapter->login_rsp_buf) +
>> +				be32_to_cpu(adapter->login_rsp_buf->
>> +					    off_txsubm_subcrqs));
>> +
>> +	for (i = 0; i < num_rx_pools; i++)
>> +		adapter->rx_scrq[i]->handle_array =
>> +			(u64 *)((u8 *)(adapter->login_rsp_buf) +
>> +				be32_to_cpu(adapter->login_rsp_buf->
>> +					    off_rxadd_subcrqs));
>> +
>>   	release_login_buffer(adapter);
>>   	complete(&adapter->init_done);
>>   
>> diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
>> index f8416e1d4cf0..e51c72d1e357 100644
>> --- a/drivers/net/ethernet/ibm/ibmvnic.h
>> +++ b/drivers/net/ethernet/ibm/ibmvnic.h
>> @@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
>>   	struct ibmvnic_adapter *adapter;
>>   	atomic_t used;
>>   	char name[32];
>> +	u64 *handle_array;
>>   };
>>   
>>   struct ibmvnic_long_term_buff {

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-07-09 19:40 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-01 21:25 [PATCH] ibmvnic: store RX and TX subCRQ handle array in ibmvnic_adapter struct Cristobal Forno
2020-07-01 21:50 ` Thomas Falcon
2020-07-09 19:39   ` Cris Forno

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).