netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures
@ 2021-01-20  2:19 Ronak Doshi
  2021-01-22  1:07 ` Jakub Kicinski
  0 siblings, 1 reply; 6+ messages in thread
From: Ronak Doshi @ 2021-01-20  2:19 UTC (permalink / raw)
  To: netdev
  Cc: Ronak Doshi, Petr Vandrovec,
	maintainer:VMWARE VMXNET3 ETHERNET DRIVER, David S. Miller,
	Jakub Kicinski, open list

From: Petr Vandrovec <petr@vmware.com>

vmxnet3: Remove buf_info from device accessible structures

buf_info structures in RX & TX queues are private driver data that
do not need to be visible to the device.  Although there is physical
address and length in the queue descriptor that points to these
structures, their layout is not standardized, and device never looks
at them.

So lets allocate these structures in non-DMA-able memory, and fill
physical address as all-ones and length as zero in the queue
descriptor.

That should alleviate worries brought by Martin Radev in
https://lists.osuosl.org/pipermail/intel-wired-lan/Week-of-Mon-20210104/022829.html
that malicious vmxnet3 device could subvert SVM/TDX guarantees.

Signed-off-by: Petr Vandrovec <petr@vmware.com>
Signed-off-by: Ronak Doshi <doshir@vmware.com>
---
 drivers/net/vmxnet3/vmxnet3_drv.c | 45 ++++++++++++++++-----------------------
 drivers/net/vmxnet3/vmxnet3_int.h |  2 --
 2 files changed, 18 insertions(+), 29 deletions(-)

diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 336504b7531d..8f5e1e363598 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -452,9 +452,7 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
 		tq->comp_ring.base = NULL;
 	}
 	if (tq->buf_info) {
-		dma_free_coherent(&adapter->pdev->dev,
-				  tq->tx_ring.size * sizeof(tq->buf_info[0]),
-				  tq->buf_info, tq->buf_info_pa);
+		kfree(tq->buf_info);
 		tq->buf_info = NULL;
 	}
 }
@@ -505,8 +503,6 @@ static int
 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
 		  struct vmxnet3_adapter *adapter)
 {
-	size_t sz;
-
 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
 	       tq->comp_ring.base || tq->buf_info);
 
@@ -534,11 +530,13 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
 		goto err;
 	}
 
-	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
-	tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
-					  &tq->buf_info_pa, GFP_KERNEL);
-	if (!tq->buf_info)
+	tq->buf_info = kmalloc_array_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
+					  GFP_KERNEL | __GFP_ZERO,
+					  dev_to_node(&adapter->pdev->dev));
+	if (!tq->buf_info) {
+		netdev_err(adapter->netdev, "failed to allocate tx buffer info\n");
 		goto err;
+	}
 
 	return 0;
 
@@ -1738,10 +1736,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
 	}
 
 	if (rq->buf_info[0]) {
-		size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
-			(rq->rx_ring[0].size + rq->rx_ring[1].size);
-		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
-				  rq->buf_info_pa);
+		kfree(rq->buf_info[0]);
 		rq->buf_info[0] = rq->buf_info[1] = NULL;
 	}
 }
@@ -1883,12 +1878,13 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
 		goto err;
 	}
 
-	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
-						   rq->rx_ring[1].size);
-	bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
-				GFP_KERNEL);
-	if (!bi)
+	bi = kmalloc_array_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
+				sizeof(rq->buf_info[0][0]), GFP_KERNEL | __GFP_ZERO,
+				dev_to_node(&adapter->pdev->dev));
+	if (!bi) {
+		netdev_err(adapter->netdev, "failed to allocate rx buffer info\n");
 		goto err;
+	}
 
 	rq->buf_info[0] = bi;
 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
@@ -2522,14 +2518,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
-		tqc->ddPA           = cpu_to_le64(tq->buf_info_pa);
+		tqc->ddPA           = cpu_to_le64(~0ULL);
 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
 		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
-		tqc->ddLen          = cpu_to_le32(
-					sizeof(struct vmxnet3_tx_buf_info) *
-					tqc->txRingSize);
+		tqc->ddLen          = cpu_to_le32(0);
 		tqc->intrIdx        = tq->comp_ring.intr_idx;
 	}
 
@@ -2541,14 +2535,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
-		rqc->ddPA            = cpu_to_le64(rq->buf_info_pa);
+		rqc->ddPA            = cpu_to_le64(~0ULL);
 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
-		rqc->ddLen           = cpu_to_le32(
-					sizeof(struct vmxnet3_rx_buf_info) *
-					(rqc->rxRingSize[0] +
-					 rqc->rxRingSize[1]));
+		rqc->ddLen           = cpu_to_le32(0);
 		rqc->intrIdx         = rq->comp_ring.intr_idx;
 		if (VMXNET3_VERSION_GE_3(adapter)) {
 			rqc->rxDataRingBasePA =
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index d958b92c9429..e910596b79cf 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -240,7 +240,6 @@ struct vmxnet3_tx_queue {
 	spinlock_t                      tx_lock;
 	struct vmxnet3_cmd_ring         tx_ring;
 	struct vmxnet3_tx_buf_info      *buf_info;
-	dma_addr_t                       buf_info_pa;
 	struct vmxnet3_tx_data_ring     data_ring;
 	struct vmxnet3_comp_ring        comp_ring;
 	struct Vmxnet3_TxQueueCtrl      *shared;
@@ -298,7 +297,6 @@ struct vmxnet3_rx_queue {
 	u32 qid2;           /* rqID in RCD for buffer from 2nd ring */
 	u32 dataRingQid;    /* rqID in RCD for buffer from data ring */
 	struct vmxnet3_rx_buf_info     *buf_info[2];
-	dma_addr_t                      buf_info_pa;
 	struct Vmxnet3_RxQueueCtrl            *shared;
 	struct vmxnet3_rq_driver_stats  stats;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
-- 
2.11.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures
  2021-01-20  2:19 [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures Ronak Doshi
@ 2021-01-22  1:07 ` Jakub Kicinski
  2021-01-22  8:24   ` Ronak Doshi
  0 siblings, 1 reply; 6+ messages in thread
From: Jakub Kicinski @ 2021-01-22  1:07 UTC (permalink / raw)
  To: Ronak Doshi
  Cc: netdev, Petr Vandrovec,
	maintainer:VMWARE VMXNET3 ETHERNET DRIVER, David S. Miller,
	open list

On Tue, 19 Jan 2021 18:19:40 -0800 Ronak Doshi wrote:
> From: Petr Vandrovec <petr@vmware.com>
> 
> vmxnet3: Remove buf_info from device accessible structures

Something happened to the posting, looks like the subject is listed
twice?

> buf_info structures in RX & TX queues are private driver data that
> do not need to be visible to the device.  Although there is physical
> address and length in the queue descriptor that points to these
> structures, their layout is not standardized, and device never looks
> at them.
> 
> So lets allocate these structures in non-DMA-able memory, and fill
> physical address as all-ones and length as zero in the queue
> descriptor.
> 
> That should alleviate worries brought by Martin Radev in
> https://lists.osuosl.org/pipermail/intel-wired-lan/Week-of-Mon-20210104/022829.html
> that malicious vmxnet3 device could subvert SVM/TDX guarantees.
> 
> Signed-off-by: Petr Vandrovec <petr@vmware.com>
> Signed-off-by: Ronak Doshi <doshir@vmware.com>

> @@ -534,11 +530,13 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
>  		goto err;
>  	}
>  
> -	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
> -	tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
> -					  &tq->buf_info_pa, GFP_KERNEL);
> -	if (!tq->buf_info)
> +	tq->buf_info = kmalloc_array_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
> +					  GFP_KERNEL | __GFP_ZERO,
> +					  dev_to_node(&adapter->pdev->dev));

kcalloc_node()

> +	if (!tq->buf_info) {
> +		netdev_err(adapter->netdev, "failed to allocate tx buffer info\n");

Please drop the message, OOM splat will be visible enough. checkpatch
usually points this out

>  		goto err;
> +	}

Same comments for vmxnet3_rq_create()

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures
  2021-01-22  1:07 ` Jakub Kicinski
@ 2021-01-22  8:24   ` Ronak Doshi
  2021-01-22 17:50     ` Jakub Kicinski
  0 siblings, 1 reply; 6+ messages in thread
From: Ronak Doshi @ 2021-01-22  8:24 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: netdev, Petr Vandrovec, Pv-drivers, David S. Miller, open list


On 1/21/21, 5:07 PM, "Jakub Kicinski" <kuba@kernel.org> wrote:
>  On Tue, 19 Jan 2021 18:19:40 -0800 Ronak Doshi wrote:
>  > From: Petr Vandrovec <petr@vmware.com>
>  > 
>  > vmxnet3: Remove buf_info from device accessible structures
>
>    Something happened to the posting, looks like the subject is listed
>   twice?
It got sent twice. Please ignore.

>  > -	if (!tq->buf_info)
>  > +	tq->buf_info = kmalloc_array_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
>  > +					  GFP_KERNEL | __GFP_ZERO,
>  > +					  dev_to_node(&adapter->pdev->dev));
>
>   kcalloc_node()
Sure, will use this callback.

>  > +	if (!tq->buf_info) {
>  > +		netdev_err(adapter->netdev, "failed to allocate tx buffer info\n")
>
> Please drop the message, OOM splat will be visible enough. checkpatch
> usually points this out

Okay, will drop it. Checkpatch did not complain about the error message though.

Thanks,
Ronak


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures
  2021-01-22  8:24   ` Ronak Doshi
@ 2021-01-22 17:50     ` Jakub Kicinski
  2021-01-22 18:37       ` [PATCH] checkpatch: Add kmalloc_array_node to unnecessary OOM message check Joe Perches
  0 siblings, 1 reply; 6+ messages in thread
From: Jakub Kicinski @ 2021-01-22 17:50 UTC (permalink / raw)
  To: Ronak Doshi
  Cc: netdev, Petr Vandrovec, Pv-drivers, David S. Miller, open list,
	Joe Perches

On Fri, 22 Jan 2021 08:24:59 +0000 Ronak Doshi wrote:
> On 1/21/21, 5:07 PM, "Jakub Kicinski" <kuba@kernel.org> wrote:
> >  On Tue, 19 Jan 2021 18:19:40 -0800 Ronak Doshi wrote:  
> > > +	tq->buf_info = kmalloc_array_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
> > > +					  GFP_KERNEL | __GFP_ZERO,
> > > +					  dev_to_node(&adapter->pdev->dev));  
> > > +	if (!tq->buf_info) {
> > > +		netdev_err(adapter->netdev, "failed to allocate tx buffer info\n")  
> >
> > Please drop the message, OOM splat will be visible enough. checkpatch
> > usually points this out  
> 
> Okay, will drop it. Checkpatch did not complain about the error message though.

Looks like it matches alloc_node or alloc_array, but not
alloc_array_node ?

our $allocFunctions = qr{(?x:
	(?:(?:devm_)?
		(?:kv|k|v)[czm]alloc(?:_node|_array)? |


CCing Joe in case he thinks it's worth fixing

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] checkpatch: Add kmalloc_array_node to unnecessary OOM message check
  2021-01-22 17:50     ` Jakub Kicinski
@ 2021-01-22 18:37       ` Joe Perches
  0 siblings, 0 replies; 6+ messages in thread
From: Joe Perches @ 2021-01-22 18:37 UTC (permalink / raw)
  To: Jakub Kicinski, Ronak Doshi, Andrew Morton
  Cc: netdev, Petr Vandrovec, Pv-drivers, David S. Miller, open list

commit 5799b255c491 ("include/linux/slab.h: add kmalloc_array_node() and
kcalloc_node()") was added in 2017.  Update the unnecessary OOM message
test to include it.

Signed-off-by: Joe Perches <joe@perches.com>
Reported-by: Jakub Kicinski <kuba@kernel.org>
---

Maybe not worth fixing, but no real effort to fix either.

 scripts/checkpatch.pl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4f8494527139..8dbf1986a8de 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -487,7 +487,7 @@ our $logFunctions = qr{(?x:
 
 our $allocFunctions = qr{(?x:
 	(?:(?:devm_)?
-		(?:kv|k|v)[czm]alloc(?:_node|_array)? |
+		(?:kv|k|v)[czm]alloc(?:_array)?(?:_node)? |
 		kstrdup(?:_const)? |
 		kmemdup(?:_nul)?) |
 	(?:\w+)?alloc_skb(?:_ip_align)? |


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures
@ 2021-01-20  2:11 Ronak Doshi
  0 siblings, 0 replies; 6+ messages in thread
From: Ronak Doshi @ 2021-01-20  2:11 UTC (permalink / raw)
  To: netdev
  Cc: Ronak Doshi, Petr Vandrovec,
	maintainer:VMWARE VMXNET3 ETHERNET DRIVER, David S. Miller,
	Jakub Kicinski, open list

From: Petr Vandrovec <petr@vmware.com>

vmxnet3: Remove buf_info from device accessible structures

buf_info structures in RX & TX queues are private driver data that
do not need to be visible to the device.  Although there is physical
address and length in the queue descriptor that points to these
structures, their layout is not standardized, and device never looks
at them.

So lets allocate these structures in non-DMA-able memory, and fill
physical address as all-ones and length as zero in the queue
descriptor.

That should alleviate worries brought by Martin Radev in
https://lists.osuosl.org/pipermail/intel-wired-lan/Week-of-Mon-20210104/022829.html
that malicious vmxnet3 device could subvert SVM/TDX guarantees.

Signed-off-by: Petr Vandrovec <petr@vmware.com>
Signed-off-by: Ronak Doshi <doshir@vmware.com>
---
 drivers/net/vmxnet3/vmxnet3_drv.c | 45 ++++++++++++++++-----------------------
 drivers/net/vmxnet3/vmxnet3_int.h |  2 --
 2 files changed, 18 insertions(+), 29 deletions(-)

diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 336504b7531d..8f5e1e363598 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -452,9 +452,7 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
 		tq->comp_ring.base = NULL;
 	}
 	if (tq->buf_info) {
-		dma_free_coherent(&adapter->pdev->dev,
-				  tq->tx_ring.size * sizeof(tq->buf_info[0]),
-				  tq->buf_info, tq->buf_info_pa);
+		kfree(tq->buf_info);
 		tq->buf_info = NULL;
 	}
 }
@@ -505,8 +503,6 @@ static int
 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
 		  struct vmxnet3_adapter *adapter)
 {
-	size_t sz;
-
 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
 	       tq->comp_ring.base || tq->buf_info);
 
@@ -534,11 +530,13 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
 		goto err;
 	}
 
-	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
-	tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
-					  &tq->buf_info_pa, GFP_KERNEL);
-	if (!tq->buf_info)
+	tq->buf_info = kmalloc_array_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
+					  GFP_KERNEL | __GFP_ZERO,
+					  dev_to_node(&adapter->pdev->dev));
+	if (!tq->buf_info) {
+		netdev_err(adapter->netdev, "failed to allocate tx buffer info\n");
 		goto err;
+	}
 
 	return 0;
 
@@ -1738,10 +1736,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
 	}
 
 	if (rq->buf_info[0]) {
-		size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
-			(rq->rx_ring[0].size + rq->rx_ring[1].size);
-		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
-				  rq->buf_info_pa);
+		kfree(rq->buf_info[0]);
 		rq->buf_info[0] = rq->buf_info[1] = NULL;
 	}
 }
@@ -1883,12 +1878,13 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
 		goto err;
 	}
 
-	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
-						   rq->rx_ring[1].size);
-	bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
-				GFP_KERNEL);
-	if (!bi)
+	bi = kmalloc_array_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
+				sizeof(rq->buf_info[0][0]), GFP_KERNEL | __GFP_ZERO,
+				dev_to_node(&adapter->pdev->dev));
+	if (!bi) {
+		netdev_err(adapter->netdev, "failed to allocate rx buffer info\n");
 		goto err;
+	}
 
 	rq->buf_info[0] = bi;
 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
@@ -2522,14 +2518,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
-		tqc->ddPA           = cpu_to_le64(tq->buf_info_pa);
+		tqc->ddPA           = cpu_to_le64(~0ULL);
 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
 		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
-		tqc->ddLen          = cpu_to_le32(
-					sizeof(struct vmxnet3_tx_buf_info) *
-					tqc->txRingSize);
+		tqc->ddLen          = cpu_to_le32(0);
 		tqc->intrIdx        = tq->comp_ring.intr_idx;
 	}
 
@@ -2541,14 +2535,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
-		rqc->ddPA            = cpu_to_le64(rq->buf_info_pa);
+		rqc->ddPA            = cpu_to_le64(~0ULL);
 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
-		rqc->ddLen           = cpu_to_le32(
-					sizeof(struct vmxnet3_rx_buf_info) *
-					(rqc->rxRingSize[0] +
-					 rqc->rxRingSize[1]));
+		rqc->ddLen           = cpu_to_le32(0);
 		rqc->intrIdx         = rq->comp_ring.intr_idx;
 		if (VMXNET3_VERSION_GE_3(adapter)) {
 			rqc->rxDataRingBasePA =
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index d958b92c9429..e910596b79cf 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -240,7 +240,6 @@ struct vmxnet3_tx_queue {
 	spinlock_t                      tx_lock;
 	struct vmxnet3_cmd_ring         tx_ring;
 	struct vmxnet3_tx_buf_info      *buf_info;
-	dma_addr_t                       buf_info_pa;
 	struct vmxnet3_tx_data_ring     data_ring;
 	struct vmxnet3_comp_ring        comp_ring;
 	struct Vmxnet3_TxQueueCtrl      *shared;
@@ -298,7 +297,6 @@ struct vmxnet3_rx_queue {
 	u32 qid2;           /* rqID in RCD for buffer from 2nd ring */
 	u32 dataRingQid;    /* rqID in RCD for buffer from data ring */
 	struct vmxnet3_rx_buf_info     *buf_info[2];
-	dma_addr_t                      buf_info_pa;
 	struct Vmxnet3_RxQueueCtrl            *shared;
 	struct vmxnet3_rq_driver_stats  stats;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
-- 
2.11.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-01-22 22:26 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-20  2:19 [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures Ronak Doshi
2021-01-22  1:07 ` Jakub Kicinski
2021-01-22  8:24   ` Ronak Doshi
2021-01-22 17:50     ` Jakub Kicinski
2021-01-22 18:37       ` [PATCH] checkpatch: Add kmalloc_array_node to unnecessary OOM message check Joe Perches
  -- strict thread matches above, loose matches on Subject: below --
2021-01-20  2:11 [PATCH net-next] vmxnet3: Remove buf_info from device accessible structures Ronak Doshi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).