linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
	RDMA mailing list <linux-rdma@vger.kernel.org>,
	Daniel Jurgens <danielj@mellanox.com>,
	Erez Shitrit <erezsh@mellanox.com>,
	Jason Gunthorpe <jgg@ziepe.ca>,
	Maor Gottlieb <maorg@mellanox.com>,
	Michael Guralnik <michaelgur@mellanox.com>,
	Moni Shoua <monis@mellanox.com>,
	Parav Pandit <parav@mellanox.com>,
	Sean Hefty <sean.hefty@intel.com>,
	Valentine Fatiev <valentinef@mellanox.com>,
	Yishai Hadas <yishaih@mellanox.com>,
	Yonatan Cohen <yonatanc@mellanox.com>,
	Zhu Yanjun <yanjunz@mellanox.com>
Subject: [PATCH rdma-rc 6/9] IB/mlx5: Fix async events cleanup flows
Date: Wed, 12 Feb 2020 09:26:32 +0200	[thread overview]
Message-ID: <20200212072635.682689-7-leon@kernel.org> (raw)
In-Reply-To: <20200212072635.682689-1-leon@kernel.org>

From: Yishai Hadas <yishaih@mellanox.com>

Fix async events flows to prevent race between the read event
APIs and their destroy uobj API.

In both async command/event flows, delete the event entry from the list
before its memory de-allocation and fix the async command flow to check
properly for the 'is_destroyed' under the lock.

The above comes to prevent accessing an entry post its de-allocation.

Fixes: f7c8416ccea5 ("RDMA/core: Simplify destruction of FD uobjects")
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/devx.c | 51 +++++++++++++++++--------------
 1 file changed, 28 insertions(+), 23 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index d7efc9f6daf0..46e1ab771f10 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2319,14 +2319,12 @@ static int deliver_event(struct devx_event_subscription *event_sub,
 
 	if (ev_file->omit_data) {
 		spin_lock_irqsave(&ev_file->lock, flags);
-		if (!list_empty(&event_sub->event_list)) {
+		if (!list_empty(&event_sub->event_list) ||
+		    ev_file->is_destroyed) {
 			spin_unlock_irqrestore(&ev_file->lock, flags);
 			return 0;
 		}
 
-		/* is_destroyed is ignored here because we don't have any memory
-		 * allocation to clean up for the omit_data case
-		 */
 		list_add_tail(&event_sub->event_list, &ev_file->event_list);
 		spin_unlock_irqrestore(&ev_file->lock, flags);
 		wake_up_interruptible(&ev_file->poll_wait);
@@ -2473,11 +2471,11 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
 			return -ERESTARTSYS;
 		}
 
-		if (list_empty(&ev_queue->event_list) &&
-		    ev_queue->is_destroyed)
-			return -EIO;
-
 		spin_lock_irq(&ev_queue->lock);
+		if (ev_queue->is_destroyed) {
+			spin_unlock_irq(&ev_queue->lock);
+			return -EIO;
+		}
 	}
 
 	event = list_entry(ev_queue->event_list.next,
@@ -2551,10 +2549,6 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
 		return -EOVERFLOW;
 	}
 
-	if (ev_file->is_destroyed) {
-		spin_unlock_irq(&ev_file->lock);
-		return -EIO;
-	}
 
 	while (list_empty(&ev_file->event_list)) {
 		spin_unlock_irq(&ev_file->lock);
@@ -2667,8 +2661,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
 
 	spin_lock_irq(&comp_ev_file->ev_queue.lock);
 	list_for_each_entry_safe(entry, tmp,
-				 &comp_ev_file->ev_queue.event_list, list)
+				 &comp_ev_file->ev_queue.event_list, list) {
+		list_del(&entry->list);
 		kvfree(entry);
+	}
 	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
 	return 0;
 };
@@ -2680,11 +2676,29 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
 		container_of(uobj, struct devx_async_event_file,
 			     uobj);
 	struct devx_event_subscription *event_sub, *event_sub_tmp;
-	struct devx_async_event_data *entry, *tmp;
 	struct mlx5_ib_dev *dev = ev_file->dev;
 
 	spin_lock_irq(&ev_file->lock);
 	ev_file->is_destroyed = 1;
+
+	/* free the pending events allocation */
+	if (ev_file->omit_data) {
+		struct devx_event_subscription *event_sub, *tmp;
+
+		list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
+					 event_list)
+			list_del_init(&event_sub->event_list);
+
+	} else {
+		struct devx_async_event_data *entry, *tmp;
+
+		list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
+					 list) {
+			list_del(&entry->list);
+			kfree(entry);
+		}
+	}
+
 	spin_unlock_irq(&ev_file->lock);
 	wake_up_interruptible(&ev_file->poll_wait);
 
@@ -2699,15 +2713,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
 	}
 	mutex_unlock(&dev->devx_event_table.event_xa_lock);
 
-	/* free the pending events allocation */
-	if (!ev_file->omit_data) {
-		spin_lock_irq(&ev_file->lock);
-		list_for_each_entry_safe(entry, tmp,
-					 &ev_file->event_list, list)
-			kfree(entry); /* read can't come any more */
-		spin_unlock_irq(&ev_file->lock);
-	}
-
 	put_device(&dev->ib_dev.dev);
 	return 0;
 };
-- 
2.24.1


  parent reply	other threads:[~2020-02-12  7:27 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-12  7:26 [PATCH rdma-rc 0/9] Fixes for v5.6 Leon Romanovsky
2020-02-12  7:26 ` [PATCH rdma-rc 1/9] RDMA/ucma: Mask QPN to be 24 bits according to IBTA Leon Romanovsky
2020-02-12  7:26 ` [PATCH rdma-rc 2/9] RDMA/core: Fix protection fault in get_pkey_idx_qp_list Leon Romanovsky
2020-02-12  8:01   ` Leon Romanovsky
2020-02-12  8:06   ` Leon Romanovsky
2020-02-12  7:26 ` [PATCH rdma-rc 3/9] Revert "RDMA/cma: Simplify rdma_resolve_addr() error flow" Leon Romanovsky
2020-02-13 13:30   ` Jason Gunthorpe
2020-02-14  3:11     ` Parav Pandit
2020-02-14 14:08       ` Jason Gunthorpe
2020-02-14 14:48         ` Parav Pandit
2020-02-19 20:40   ` Jason Gunthorpe
2020-02-12  7:26 ` [PATCH rdma-rc 4/9] IB/ipoib: Fix double free of skb in case of multicast traffic in CM mode Leon Romanovsky
2020-02-13 15:37   ` Jason Gunthorpe
2020-02-13 18:10     ` Leon Romanovsky
2020-02-13 18:26       ` Jason Gunthorpe
2020-02-13 18:36         ` Leon Romanovsky
2020-02-13 19:09           ` Jason Gunthorpe
2020-02-12  7:26 ` [PATCH rdma-rc 5/9] RDMA/core: Add missing list deletion on freeing event queue Leon Romanovsky
2020-02-12  7:26 ` Leon Romanovsky [this message]
2020-02-12  7:26 ` [PATCH rdma-rc 7/9] RDMA/rxe: Fix soft lockup problem due to using tasklets in softirq Leon Romanovsky
2020-02-12  7:26 ` [PATCH rdma-rc 8/9] IB/umad: Fix kernel crash while unloading ib_umad Leon Romanovsky
2020-02-13 14:28   ` Jason Gunthorpe
2020-02-13 18:03     ` Leon Romanovsky
2020-02-12  7:26 ` [PATCH rdma-rc 9/9] RDMA/mlx5: Prevent overflow in mmap offset calculations Leon Romanovsky
2020-02-13 18:03 ` [PATCH rdma-rc 0/9] Fixes for v5.6 Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200212072635.682689-7-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=danielj@mellanox.com \
    --cc=dledford@redhat.com \
    --cc=erezsh@mellanox.com \
    --cc=jgg@mellanox.com \
    --cc=jgg@ziepe.ca \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maorg@mellanox.com \
    --cc=michaelgur@mellanox.com \
    --cc=monis@mellanox.com \
    --cc=parav@mellanox.com \
    --cc=sean.hefty@intel.com \
    --cc=valentinef@mellanox.com \
    --cc=yanjunz@mellanox.com \
    --cc=yishaih@mellanox.com \
    --cc=yonatanc@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).