stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 5.10 0/1] fix null-ptr-deref in rds_ib_add_one()
@ 2021-11-12  8:41 Yang Yingliang
  2021-11-12  8:41 ` [PATCH 5.10 1/1] rds: stop using dmapool Yang Yingliang
  0 siblings, 1 reply; 2+ messages in thread
From: Yang Yingliang @ 2021-11-12  8:41 UTC (permalink / raw)
  To: stable
  Cc: linux-kernel, rds-devel, linux-rdma, netdev, gregkh, sashal,
	kuba, davem, santosh.shilimkar

I got this null-ptr-deref report while doing fuzz test:

[  158.820284][T12735] BUG: KASAN: null-ptr-deref in dma_pool_create+0xf7/0x440
[  158.821192][T12735] Read of size 4 at addr 0000000000000298 by task syz-executor.7/12735
[  158.822239][T12735]
[  158.822539][T12735] CPU: 0 PID: 12735 Comm: syz-executor.7 Not tainted 5.10.78 #691
[  158.823494][T12735] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
[  158.824720][T12735] Call Trace:
[  158.825889][T12735]  dump_stack+0x111/0x151
[  158.826458][T12735]  ? dma_pool_create+0xf7/0x440
[  158.827067][T12735]  ? dma_pool_create+0xf7/0x440
[  158.827672][T12735]  kasan_report.cold.11+0x5/0x37
[  158.828289][T12735]  ? dma_pool_create+0xf7/0x440
[  158.828894][T12735]  dma_pool_create+0xf7/0x440
[  158.829480][T12735]  rds_ib_add_one+0x448/0x570
[  158.830062][T12735]  ? rds_ib_remove_one+0x150/0x150
[  158.830702][T12735]  add_client_context+0x2ef/0x440
[  158.831329][T12735]  ? ib_unregister_driver+0x1a0/0x1a0
[  158.832005][T12735]  ? strchr+0x28/0x50
[  158.832527][T12735]  enable_device_and_get+0x199/0x320
[  158.833194][T12735]  ? add_one_compat_dev.part.20+0x3d0/0x3d0
[  158.833924][T12735]  ? rxe_ib_alloc_hw_stats+0x84/0x90
[  158.834590][T12735]  ? setup_hw_stats+0x40/0x520
[  158.835184][T12735]  ? uevent_show+0x1e0/0x1e0
[  158.835758][T12735]  ? rxe_ib_get_hw_stats+0xa0/0xa0
[  158.836398][T12735]  ib_register_device+0x8ef/0x9d0
[  158.837026][T12735]  ? netlink_unicast+0x3e1/0x510
[  158.837644][T12735]  ? alloc_port_data.part.17+0x1f0/0x1f0
[  158.838358][T12735]  ? __alloc_pages_nodemask+0x229/0x450
[  158.839051][T12735]  ? kasan_unpoison_shadow+0x30/0x40
[  158.839710][T12735]  ? __kasan_kmalloc.constprop.12+0xbe/0xd0
[  158.840443][T12735]  ? kmem_cache_alloc_node_trace+0xa3/0x870
[  158.841178][T12735]  ? __crypto_alg_lookup+0x26d/0x2d0
[  158.841841][T12735]  ? __kasan_kmalloc.constprop.12+0xbe/0xd0
[  158.842573][T12735]  ? crypto_shash_init_tfm+0x10d/0x160
[  158.843255][T12735]  ? crc32_pclmul_cra_init+0x12/0x20
[  158.843916][T12735]  ? crypto_create_tfm_node+0xb7/0x1a0
[  158.844594][T12735]  ? crypto_alloc_tfm_node+0x12e/0x260
[  158.845273][T12735]  rxe_register_device+0x21f/0x250
[  158.845904][T12735]  rxe_add+0x9f9/0xa80
[  158.846412][T12735]  rxe_net_add+0x56/0xa0
[  158.846917][T12735]  rxe_newlink+0x8c/0xb0
[  158.847427][T12735]  nldev_newlink+0x23d/0x360
[  158.847973][T12735]  ? nldev_set_doit+0x2b0/0x2b0
[  158.848562][T12735]  ? apparmor_capable+0x2e9/0x4e0
[  158.849159][T12735]  ? apparmor_cred_prepare+0x3f0/0x3f0
[  158.849811][T12735]  ? apparmor_cred_prepare+0x3f0/0x3f0
[  158.850469][T12735]  ? ____sys_sendmsg+0x4db/0x500
[  158.851064][T12735]  ? ___sys_sendmsg+0xf8/0x160
[  158.851648][T12735]  ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  158.852400][T12735]  ? cap_capable+0x125/0x140
[  158.854398][T12735]  ? ns_capable_common+0x88/0xa0
[  158.855012][T12735]  ? nldev_set_doit+0x2b0/0x2b0
[  158.855611][T12735]  rdma_nl_rcv+0x41f/0x630
[  158.856162][T12735]  ? rdma_nl_multicast+0xa0/0xa0
[  158.856772][T12735]  ? netlink_lookup+0x273/0x3a0
[  158.857374][T12735]  ? netlink_broadcast+0x40/0x40
[  158.857984][T12735]  ? __kasan_kmalloc.constprop.12+0xbe/0xd0
[  158.858724][T12735]  ? __rcu_read_unlock+0x34/0x260
[  158.859349][T12735]  ? netlink_deliver_tap+0x65/0x450
[  158.859991][T12735]  netlink_unicast+0x3e1/0x510
[  158.860586][T12735]  ? netlink_attachskb+0x540/0x540
[  158.861218][T12735]  ? _copy_from_iter_full+0x1b9/0x5e0
[  158.861905][T12735]  ? __check_object_size+0x27c/0x300
[  158.862561][T12735]  netlink_sendmsg+0x4aa/0x870
[  158.863131][T12735]  ? netlink_unicast+0x510/0x510
[  158.863728][T12735]  ? netlink_unicast+0x510/0x510
[  158.864326][T12735]  sock_sendmsg+0x83/0xa0
[  158.864849][T12735]  ____sys_sendmsg+0x4db/0x500
[  158.865429][T12735]  ? __copy_msghdr_from_user+0x310/0x310
[  158.866112][T12735]  ? kernel_sendmsg+0x50/0x50
[  158.866700][T12735]  ? do_syscall_64+0x2d/0x70
[  158.867256][T12735]  ? entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  158.867996][T12735]  ___sys_sendmsg+0xf8/0x160
[  158.868552][T12735]  ? sendmsg_copy_msghdr+0x70/0x70
[  158.869175][T12735]  ? kasan_unpoison_shadow+0x30/0x40
[  158.869824][T12735]  ? futex_exit_release+0x80/0x80
[  158.870442][T12735]  ? apparmor_task_setrlimit+0x500/0x500
[  158.871122][T12735]  ? kmem_cache_alloc+0x143/0x810
[  158.871733][T12735]  ? __rcu_read_unlock+0x34/0x260
[  158.872351][T12735]  ? __fget_files+0x14a/0x1b0
[  158.872920][T12735]  ? __fget_light+0xeb/0x140
[  158.873474][T12735]  __sys_sendmsg+0xfe/0x1c0
[  158.874017][T12735]  ? __sys_sendmsg_sock+0x80/0x80
[  158.874625][T12735]  ? _copy_to_user+0x97/0xb0
[  158.875177][T12735]  ? put_timespec64+0xab/0xe0
[  158.875740][T12735]  ? nsecs_to_jiffies+0x30/0x30
[  158.876325][T12735]  ? fpregs_assert_state_consistent+0x8f/0xa0
[  158.877054][T12735]  do_syscall_64+0x2d/0x70
[  158.877585][T12735]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  158.878297][T12735] RIP: 0033:0x45ecc9
[  158.878769][T12735] Code: 1d b1 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 eb b0 fb ff c3 66 2e 0f 1f 84 00 00 00 00
[  158.881085][T12735] RSP: 002b:00007f2c7a09fc68 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
[  158.882086][T12735] RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 000000000045ecc9
[  158.883038][T12735] RDX: 0000000000000000 RSI: 00000000200001c0 RDI: 000000000000000a
[  158.883983][T12735] RBP: 000000000119bfe0 R08: 0000000000000000 R09: 0000000000000000
[  158.884929][T12735] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000119bfac
[  158.885877][T12735] R13: 00007fff86afff9f R14: 00007f2c7a0a09c0 R15: 000000000119bfac


In rxe_register_device(), it passes a null pointer to ib_register_device()
the 'device->dma_device' will be set to null, when it used in rds_ib_add_one(),
it leads a null-ptr-deref.

Christoph Hellwig (1):
  rds: stop using dmapool

 net/rds/ib.c      |  10 ----
 net/rds/ib.h      |   6 ---
 net/rds/ib_cm.c   | 128 ++++++++++++++++++++++++++++------------------
 net/rds/ib_recv.c |  18 +++++--
 net/rds/ib_send.c |   8 +++
 5 files changed, 101 insertions(+), 69 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH 5.10 1/1] rds: stop using dmapool
  2021-11-12  8:41 [PATCH 5.10 0/1] fix null-ptr-deref in rds_ib_add_one() Yang Yingliang
@ 2021-11-12  8:41 ` Yang Yingliang
  0 siblings, 0 replies; 2+ messages in thread
From: Yang Yingliang @ 2021-11-12  8:41 UTC (permalink / raw)
  To: stable
  Cc: linux-kernel, rds-devel, linux-rdma, netdev, gregkh, sashal,
	kuba, davem, santosh.shilimkar

From: Christoph Hellwig <hch@lst.de>

commit 42f2611cc1738b201701e717246e11e86bef4e1e upstream.

RDMA ULPs should only perform DMA through the ib_dma_* API instead of
using the hidden dma_device directly.  In addition using the dma coherent
API family that dmapool is a part of can be very ineffcient on plaforms
that are not DMA coherent.  Switch to use slab allocations and the
ib_dma_* APIs instead.

Link: https://lore.kernel.org/r/20201106181941.1878556-6-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
---
 net/rds/ib.c      |  10 ----
 net/rds/ib.h      |   6 ---
 net/rds/ib_cm.c   | 128 ++++++++++++++++++++++++++++------------------
 net/rds/ib_recv.c |  18 +++++--
 net/rds/ib_send.c |   8 +++
 5 files changed, 101 insertions(+), 69 deletions(-)

diff --git a/net/rds/ib.c b/net/rds/ib.c
index deecbdcdae84e..24c9a9005a6fb 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -30,7 +30,6 @@
  * SOFTWARE.
  *
  */
-#include <linux/dmapool.h>
 #include <linux/kernel.h>
 #include <linux/in.h>
 #include <linux/if.h>
@@ -108,7 +107,6 @@ static void rds_ib_dev_free(struct work_struct *work)
 		rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
 	if (rds_ibdev->pd)
 		ib_dealloc_pd(rds_ibdev->pd);
-	dma_pool_destroy(rds_ibdev->rid_hdrs_pool);
 
 	list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
 		list_del(&i_ipaddr->list);
@@ -191,14 +189,6 @@ static int rds_ib_add_one(struct ib_device *device)
 		rds_ibdev->pd = NULL;
 		goto put_dev;
 	}
-	rds_ibdev->rid_hdrs_pool = dma_pool_create(device->name,
-						   device->dma_device,
-						   sizeof(struct rds_header),
-						   L1_CACHE_BYTES, 0);
-	if (!rds_ibdev->rid_hdrs_pool) {
-		ret = -ENOMEM;
-		goto put_dev;
-	}
 
 	rds_ibdev->mr_1m_pool =
 		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
diff --git a/net/rds/ib.h b/net/rds/ib.h
index c23a11d9ad362..2ba71102b1f1f 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -246,7 +246,6 @@ struct rds_ib_device {
 	struct list_head	conn_list;
 	struct ib_device	*dev;
 	struct ib_pd		*pd;
-	struct dma_pool		*rid_hdrs_pool; /* RDS headers DMA pool */
 	u8			odp_capable:1;
 
 	unsigned int		max_mrs;
@@ -380,11 +379,6 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
 void rds_ib_cm_connect_complete(struct rds_connection *conn,
 				struct rdma_cm_event *event);
-struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev,
-				       struct dma_pool *pool,
-				       dma_addr_t **dma_addrs, u32 num_hdrs);
-void rds_dma_hdrs_free(struct dma_pool *pool, struct rds_header **hdrs,
-		       dma_addr_t *dma_addrs, u32 num_hdrs);
 
 #define rds_ib_conn_error(conn, fmt...) \
 	__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index b36b60668b1da..f5cbe963cd8f7 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -30,7 +30,6 @@
  * SOFTWARE.
  *
  */
-#include <linux/dmapool.h>
 #include <linux/kernel.h>
 #include <linux/in.h>
 #include <linux/slab.h>
@@ -441,42 +440,87 @@ static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
 	rds_ibdev->vector_load[index]--;
 }
 
+static void rds_dma_hdr_free(struct ib_device *dev, struct rds_header *hdr,
+		dma_addr_t dma_addr, enum dma_data_direction dir)
+{
+	ib_dma_unmap_single(dev, dma_addr, sizeof(*hdr), dir);
+	kfree(hdr);
+}
+
+static struct rds_header *rds_dma_hdr_alloc(struct ib_device *dev,
+		dma_addr_t *dma_addr, enum dma_data_direction dir)
+{
+	struct rds_header *hdr;
+
+	hdr = kzalloc_node(sizeof(*hdr), GFP_KERNEL, ibdev_to_node(dev));
+	if (!hdr)
+		return NULL;
+
+	*dma_addr = ib_dma_map_single(dev, hdr, sizeof(*hdr),
+				      DMA_BIDIRECTIONAL);
+	if (ib_dma_mapping_error(dev, *dma_addr)) {
+		kfree(hdr);
+		return NULL;
+	}
+
+	return hdr;
+}
+
+/* Free the DMA memory used to store struct rds_header.
+ *
+ * @dev: the RDS IB device
+ * @hdrs: pointer to the array storing DMA memory pointers
+ * @dma_addrs: pointer to the array storing DMA addresses
+ * @num_hdars: number of headers to free.
+ */
+static void rds_dma_hdrs_free(struct rds_ib_device *dev,
+		struct rds_header **hdrs, dma_addr_t *dma_addrs, u32 num_hdrs,
+		enum dma_data_direction dir)
+{
+	u32 i;
+
+	for (i = 0; i < num_hdrs; i++)
+		rds_dma_hdr_free(dev->dev, hdrs[i], dma_addrs[i], dir);
+	kvfree(hdrs);
+	kvfree(dma_addrs);
+}
+
+
 /* Allocate DMA coherent memory to be used to store struct rds_header for
  * sending/receiving packets.  The pointers to the DMA memory and the
  * associated DMA addresses are stored in two arrays.
  *
- * @ibdev: the IB device
- * @pool: the DMA memory pool
+ * @dev: the RDS IB device
  * @dma_addrs: pointer to the array for storing DMA addresses
  * @num_hdrs: number of headers to allocate
  *
  * It returns the pointer to the array storing the DMA memory pointers.  On
  * error, NULL pointer is returned.
  */
-struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev,
-				       struct dma_pool *pool,
-				       dma_addr_t **dma_addrs, u32 num_hdrs)
+static struct rds_header **rds_dma_hdrs_alloc(struct rds_ib_device *dev,
+		dma_addr_t **dma_addrs, u32 num_hdrs,
+		enum dma_data_direction dir)
 {
 	struct rds_header **hdrs;
 	dma_addr_t *hdr_daddrs;
 	u32 i;
 
 	hdrs = kvmalloc_node(sizeof(*hdrs) * num_hdrs, GFP_KERNEL,
-			     ibdev_to_node(ibdev));
+			     ibdev_to_node(dev->dev));
 	if (!hdrs)
 		return NULL;
 
 	hdr_daddrs = kvmalloc_node(sizeof(*hdr_daddrs) * num_hdrs, GFP_KERNEL,
-				   ibdev_to_node(ibdev));
+				   ibdev_to_node(dev->dev));
 	if (!hdr_daddrs) {
 		kvfree(hdrs);
 		return NULL;
 	}
 
 	for (i = 0; i < num_hdrs; i++) {
-		hdrs[i] = dma_pool_zalloc(pool, GFP_KERNEL, &hdr_daddrs[i]);
+		hdrs[i] = rds_dma_hdr_alloc(dev->dev, &hdr_daddrs[i], dir);
 		if (!hdrs[i]) {
-			rds_dma_hdrs_free(pool, hdrs, hdr_daddrs, i);
+			rds_dma_hdrs_free(dev, hdrs, hdr_daddrs, i, dir);
 			return NULL;
 		}
 	}
@@ -485,24 +529,6 @@ struct rds_header **rds_dma_hdrs_alloc(struct ib_device *ibdev,
 	return hdrs;
 }
 
-/* Free the DMA memory used to store struct rds_header.
- *
- * @pool: the DMA memory pool
- * @hdrs: pointer to the array storing DMA memory pointers
- * @dma_addrs: pointer to the array storing DMA addresses
- * @num_hdars: number of headers to free.
- */
-void rds_dma_hdrs_free(struct dma_pool *pool, struct rds_header **hdrs,
-		       dma_addr_t *dma_addrs, u32 num_hdrs)
-{
-	u32 i;
-
-	for (i = 0; i < num_hdrs; i++)
-		dma_pool_free(pool, hdrs[i], dma_addrs[i]);
-	kvfree(hdrs);
-	kvfree(dma_addrs);
-}
-
 /*
  * This needs to be very careful to not leave IS_ERR pointers around for
  * cleanup to trip over.
@@ -516,7 +542,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
 	struct rds_ib_device *rds_ibdev;
 	unsigned long max_wrs;
 	int ret, fr_queue_space;
-	struct dma_pool *pool;
 
 	/*
 	 * It's normal to see a null device if an incoming connection races
@@ -612,25 +637,26 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
 		goto recv_cq_out;
 	}
 
-	pool = rds_ibdev->rid_hdrs_pool;
-	ic->i_send_hdrs = rds_dma_hdrs_alloc(dev, pool, &ic->i_send_hdrs_dma,
-					     ic->i_send_ring.w_nr);
+	ic->i_send_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_send_hdrs_dma,
+					     ic->i_send_ring.w_nr,
+					     DMA_TO_DEVICE);
 	if (!ic->i_send_hdrs) {
 		ret = -ENOMEM;
 		rdsdebug("DMA send hdrs alloc failed\n");
 		goto qp_out;
 	}
 
-	ic->i_recv_hdrs = rds_dma_hdrs_alloc(dev, pool, &ic->i_recv_hdrs_dma,
-					     ic->i_recv_ring.w_nr);
+	ic->i_recv_hdrs = rds_dma_hdrs_alloc(rds_ibdev, &ic->i_recv_hdrs_dma,
+					     ic->i_recv_ring.w_nr,
+					     DMA_FROM_DEVICE);
 	if (!ic->i_recv_hdrs) {
 		ret = -ENOMEM;
 		rdsdebug("DMA recv hdrs alloc failed\n");
 		goto send_hdrs_dma_out;
 	}
 
-	ic->i_ack = dma_pool_zalloc(pool, GFP_KERNEL,
-				    &ic->i_ack_dma);
+	ic->i_ack = rds_dma_hdr_alloc(rds_ibdev->dev, &ic->i_ack_dma,
+				      DMA_TO_DEVICE);
 	if (!ic->i_ack) {
 		ret = -ENOMEM;
 		rdsdebug("DMA ack header alloc failed\n");
@@ -666,18 +692,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
 	vfree(ic->i_sends);
 
 ack_dma_out:
-	dma_pool_free(pool, ic->i_ack, ic->i_ack_dma);
+	rds_dma_hdr_free(rds_ibdev->dev, ic->i_ack, ic->i_ack_dma,
+			 DMA_TO_DEVICE);
 	ic->i_ack = NULL;
 
 recv_hdrs_dma_out:
-	rds_dma_hdrs_free(pool, ic->i_recv_hdrs, ic->i_recv_hdrs_dma,
-			  ic->i_recv_ring.w_nr);
+	rds_dma_hdrs_free(rds_ibdev, ic->i_recv_hdrs, ic->i_recv_hdrs_dma,
+			  ic->i_recv_ring.w_nr, DMA_FROM_DEVICE);
 	ic->i_recv_hdrs = NULL;
 	ic->i_recv_hdrs_dma = NULL;
 
 send_hdrs_dma_out:
-	rds_dma_hdrs_free(pool, ic->i_send_hdrs, ic->i_send_hdrs_dma,
-			  ic->i_send_ring.w_nr);
+	rds_dma_hdrs_free(rds_ibdev, ic->i_send_hdrs, ic->i_send_hdrs_dma,
+			  ic->i_send_ring.w_nr, DMA_TO_DEVICE);
 	ic->i_send_hdrs = NULL;
 	ic->i_send_hdrs_dma = NULL;
 
@@ -1110,29 +1137,30 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
 		}
 
 		if (ic->rds_ibdev) {
-			struct dma_pool *pool;
-
-			pool = ic->rds_ibdev->rid_hdrs_pool;
-
 			/* then free the resources that ib callbacks use */
 			if (ic->i_send_hdrs) {
-				rds_dma_hdrs_free(pool, ic->i_send_hdrs,
+				rds_dma_hdrs_free(ic->rds_ibdev,
+						  ic->i_send_hdrs,
 						  ic->i_send_hdrs_dma,
-						  ic->i_send_ring.w_nr);
+						  ic->i_send_ring.w_nr,
+						  DMA_TO_DEVICE);
 				ic->i_send_hdrs = NULL;
 				ic->i_send_hdrs_dma = NULL;
 			}
 
 			if (ic->i_recv_hdrs) {
-				rds_dma_hdrs_free(pool, ic->i_recv_hdrs,
+				rds_dma_hdrs_free(ic->rds_ibdev,
+						  ic->i_recv_hdrs,
 						  ic->i_recv_hdrs_dma,
-						  ic->i_recv_ring.w_nr);
+						  ic->i_recv_ring.w_nr,
+						  DMA_FROM_DEVICE);
 				ic->i_recv_hdrs = NULL;
 				ic->i_recv_hdrs_dma = NULL;
 			}
 
 			if (ic->i_ack) {
-				dma_pool_free(pool, ic->i_ack, ic->i_ack_dma);
+				rds_dma_hdr_free(ic->rds_ibdev->dev, ic->i_ack,
+						 ic->i_ack_dma, DMA_TO_DEVICE);
 				ic->i_ack = NULL;
 			}
 		} else {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 3cffcec5fb371..6fdedd9dbbc28 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -662,10 +662,16 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi
 	seq = rds_ib_get_ack(ic);
 
 	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
+
+	ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma,
+				   sizeof(*hdr), DMA_TO_DEVICE);
 	rds_message_populate_header(hdr, 0, 0, 0);
 	hdr->h_ack = cpu_to_be64(seq);
 	hdr->h_credit = adv_credits;
 	rds_message_make_checksum(hdr);
+	ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma,
+				      sizeof(*hdr), DMA_TO_DEVICE);
+
 	ic->i_ack_queued = jiffies;
 
 	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
@@ -845,6 +851,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct rds_ib_incoming *ibinc = ic->i_ibinc;
 	struct rds_header *ihdr, *hdr;
+	dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
 
 	/* XXX shut down the connection if port 0,0 are seen? */
 
@@ -863,6 +870,8 @@ static void rds_ib_process_recv(struct rds_connection *conn,
 
 	ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
 
+	ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr,
+				   sizeof(*ihdr), DMA_FROM_DEVICE);
 	/* Validate the checksum. */
 	if (!rds_message_verify_checksum(ihdr)) {
 		rds_ib_conn_error(conn, "incoming message "
@@ -870,7 +879,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
 		       "forcing a reconnect\n",
 		       &conn->c_faddr);
 		rds_stats_inc(s_recv_drop_bad_checksum);
-		return;
+		goto done;
 	}
 
 	/* Process the ACK sequence which comes with every packet */
@@ -899,7 +908,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
 		 */
 		rds_ib_frag_free(ic, recv->r_frag);
 		recv->r_frag = NULL;
-		return;
+		goto done;
 	}
 
 	/*
@@ -933,7 +942,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
 		    hdr->h_dport != ihdr->h_dport) {
 			rds_ib_conn_error(conn,
 				"fragment header mismatch; forcing reconnect\n");
-			return;
+			goto done;
 		}
 	}
 
@@ -965,6 +974,9 @@ static void rds_ib_process_recv(struct rds_connection *conn,
 
 		rds_inc_put(&ibinc->ii_inc);
 	}
+done:
+	ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr,
+				      sizeof(*ihdr), DMA_FROM_DEVICE);
 }
 
 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index dfe778220657a..92b4a8689aae7 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -638,6 +638,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
 		send->s_sge[0].length = sizeof(struct rds_header);
 		send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
 
+		ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev,
+					   ic->i_send_hdrs_dma[pos],
+					   sizeof(struct rds_header),
+					   DMA_TO_DEVICE);
 		memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
 		       sizeof(struct rds_header));
 
@@ -688,6 +692,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
 			adv_credits = 0;
 			rds_ib_stats_inc(s_ib_tx_credit_updates);
 		}
+		ib_dma_sync_single_for_device(ic->rds_ibdev->dev,
+					      ic->i_send_hdrs_dma[pos],
+					      sizeof(struct rds_header),
+					      DMA_TO_DEVICE);
 
 		if (prev)
 			prev->s_wr.next = &send->s_wr;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-11-12  8:34 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-12  8:41 [PATCH 5.10 0/1] fix null-ptr-deref in rds_ib_add_one() Yang Yingliang
2021-11-12  8:41 ` [PATCH 5.10 1/1] rds: stop using dmapool Yang Yingliang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).