linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock
@ 2016-09-16 13:12 Jan Kara
  2016-09-16 15:55 ` Guenter Roeck
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Kara @ 2016-09-16 13:12 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-fsdevel, Miklos Szeredi, Lino Sanfilippo, Eric Paris,
	Guenter Roeck, Jan Kara

notification_mutex is used to protect the list of pending events. As
such there's no reason to use a sleeping lock for it. Convert it to a
spinlock.

Reviewed-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
Signed-off-by: Jan Kara <jack@suse.cz>
---
 fs/notify/fanotify/fanotify_user.c | 27 ++++++++++++++-------------
 fs/notify/group.c                  |  6 +++---
 fs/notify/inotify/inotify_user.c   | 16 ++++++++--------
 fs/notify/notification.c           | 27 +++++++++++++++------------
 include/linux/fsnotify_backend.h   |  2 +-
 5 files changed, 41 insertions(+), 37 deletions(-)

This is a fixed version of the patch that fixes the BUG_ON hitting on UP
kernels.

diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 46d135c4988f..80091a5dc8c0 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -49,12 +49,13 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
  * enough to fit in "count". Return an error pointer if the count
  * is not large enough.
  *
- * Called with the group->notification_mutex held.
+ * Called with the group->notification_lock held.
  */
 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 					    size_t count)
 {
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
+	       !spin_is_locked(&group->notification_lock));
 
 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 
@@ -64,7 +65,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 	if (FAN_EVENT_METADATA_LEN > count)
 		return ERR_PTR(-EINVAL);
 
-	/* held the notification_mutex the whole time, so this is the
+	/* held the notification_lock the whole time, so this is the
 	 * same event we peeked above */
 	return fsnotify_remove_first_event(group);
 }
@@ -244,10 +245,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait)
 	int ret = 0;
 
 	poll_wait(file, &group->notification_waitq, wait);
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	if (!fsnotify_notify_queue_is_empty(group))
 		ret = POLLIN | POLLRDNORM;
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	return ret;
 }
@@ -268,9 +269,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
 
 	add_wait_queue(&group->notification_waitq, &wait);
 	while (1) {
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		kevent = get_one_event(group, count);
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 
 		if (IS_ERR(kevent)) {
 			ret = PTR_ERR(kevent);
@@ -387,17 +388,17 @@ static int fanotify_release(struct inode *ignored, struct file *file)
 	 * dequeue them and set the response. They will be freed once the
 	 * response is consumed and fanotify_get_response() returns.
 	 */
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	while (!fsnotify_notify_queue_is_empty(group)) {
 		fsn_event = fsnotify_remove_first_event(group);
 		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
-			mutex_unlock(&group->notification_mutex);
+			spin_unlock(&group->notification_lock);
 			fsnotify_destroy_event(group, fsn_event);
-			mutex_lock(&group->notification_mutex);
+			spin_lock(&group->notification_lock);
 		} else
 			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
 	}
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	/* Response for all permission events it set, wakeup waiters */
 	wake_up(&group->fanotify_data.access_waitq);
@@ -423,10 +424,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 
 	switch (cmd) {
 	case FIONREAD:
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		list_for_each_entry(fsn_event, &group->notification_list, list)
 			send_len += FAN_EVENT_METADATA_LEN;
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		ret = put_user(send_len, (int __user *) p);
 		break;
 	}
diff --git a/fs/notify/group.c b/fs/notify/group.c
index b47f7cfdcaa4..fbe3cbebec16 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
  */
 void fsnotify_group_stop_queueing(struct fsnotify_group *group)
 {
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	group->shutdown = true;
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 }
 
 /*
@@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
 	atomic_set(&group->refcnt, 1);
 	atomic_set(&group->num_marks, 0);
 
-	mutex_init(&group->notification_mutex);
+	spin_lock_init(&group->notification_lock);
 	INIT_LIST_HEAD(&group->notification_list);
 	init_waitqueue_head(&group->notification_waitq);
 	group->max_events = UINT_MAX;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index b8d08d0d0a4d..69d1ea3d292a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
 	int ret = 0;
 
 	poll_wait(file, &group->notification_waitq, wait);
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	if (!fsnotify_notify_queue_is_empty(group))
 		ret = POLLIN | POLLRDNORM;
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	return ret;
 }
@@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event)
  * enough to fit in "count". Return an error pointer if
  * not large enough.
  *
- * Called with the group->notification_mutex held.
+ * Called with the group->notification_lock held.
  */
 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 					    size_t count)
@@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 	if (event_size > count)
 		return ERR_PTR(-EINVAL);
 
-	/* held the notification_mutex the whole time, so this is the
+	/* held the notification_lock the whole time, so this is the
 	 * same event we peeked above */
 	fsnotify_remove_first_event(group);
 
@@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
 
 	add_wait_queue(&group->notification_waitq, &wait);
 	while (1) {
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		kevent = get_one_event(group, count);
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 
 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
 
@@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
 
 	switch (cmd) {
 	case FIONREAD:
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		list_for_each_entry(fsn_event, &group->notification_list,
 				    list) {
 			send_len += sizeof(struct inotify_event);
 			send_len += round_event_name_len(fsn_event);
 		}
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		ret = put_user(send_len, (int __user *) p);
 		break;
 	}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 7d563dea52a4..8a7a8cd041e8 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -63,7 +63,8 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
 /* return true if the notify queue is empty, false otherwise */
 bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
 {
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
+	       !spin_is_locked(&group->notification_lock));
 	return list_empty(&group->notification_list) ? true : false;
 }
 
@@ -95,10 +96,10 @@ int fsnotify_add_event(struct fsnotify_group *group,
 
 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 
 	if (group->shutdown) {
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		return 2;
 	}
 
@@ -106,7 +107,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
 		ret = 2;
 		/* Queue overflow event only if it isn't already queued */
 		if (!list_empty(&group->overflow_event->list)) {
-			mutex_unlock(&group->notification_mutex);
+			spin_unlock(&group->notification_lock);
 			return ret;
 		}
 		event = group->overflow_event;
@@ -116,7 +117,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
 	if (!list_empty(list) && merge) {
 		ret = merge(list, event);
 		if (ret) {
-			mutex_unlock(&group->notification_mutex);
+			spin_unlock(&group->notification_lock);
 			return ret;
 		}
 	}
@@ -124,7 +125,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
 queue:
 	group->q_len++;
 	list_add_tail(&event->list, list);
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	wake_up(&group->notification_waitq);
 	kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
@@ -139,7 +140,8 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
 {
 	struct fsnotify_event *event;
 
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
+	       !spin_is_locked(&group->notification_lock));
 
 	pr_debug("%s: group=%p\n", __func__, group);
 
@@ -161,7 +163,8 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
  */
 struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
 {
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
+	       !spin_is_locked(&group->notification_lock));
 
 	return list_first_entry(&group->notification_list,
 				struct fsnotify_event, list);
@@ -175,14 +178,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
 {
 	struct fsnotify_event *event;
 
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	while (!fsnotify_notify_queue_is_empty(group)) {
 		event = fsnotify_remove_first_event(group);
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		fsnotify_destroy_event(group, event);
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 	}
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 }
 
 /*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7268ed076be8..0713e873b1c9 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -135,7 +135,7 @@ struct fsnotify_group {
 	const struct fsnotify_ops *ops;	/* how this group handles things */
 
 	/* needed to send notification to userspace */
-	struct mutex notification_mutex;	/* protect the notification_list */
+	spinlock_t notification_lock;		/* protect the notification_list */
 	struct list_head notification_list;	/* list of event_holder this group needs to send to userspace */
 	wait_queue_head_t notification_waitq;	/* read() on the notification file blocks on this waitq */
 	unsigned int q_len;			/* events on the queue */
-- 
2.6.6


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock
  2016-09-16 13:12 [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock Jan Kara
@ 2016-09-16 15:55 ` Guenter Roeck
  0 siblings, 0 replies; 4+ messages in thread
From: Guenter Roeck @ 2016-09-16 15:55 UTC (permalink / raw)
  To: Jan Kara
  Cc: Andrew Morton, linux-fsdevel, Miklos Szeredi, Lino Sanfilippo,
	Eric Paris

On Fri, Sep 16, 2016 at 03:12:47PM +0200, Jan Kara wrote:
> notification_mutex is used to protect the list of pending events. As
> such there's no reason to use a sleeping lock for it. Convert it to a
> spinlock.
> 
> Reviewed-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>
> Signed-off-by: Jan Kara <jack@suse.cz>

Tested-by: Guenter Roeck <linux@roeck-us.net>

> ---
>  fs/notify/fanotify/fanotify_user.c | 27 ++++++++++++++-------------
>  fs/notify/group.c                  |  6 +++---
>  fs/notify/inotify/inotify_user.c   | 16 ++++++++--------
>  fs/notify/notification.c           | 27 +++++++++++++++------------
>  include/linux/fsnotify_backend.h   |  2 +-
>  5 files changed, 41 insertions(+), 37 deletions(-)
> 
> This is a fixed version of the patch that fixes the BUG_ON hitting on UP
> kernels.
> 
> diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
> index 46d135c4988f..80091a5dc8c0 100644
> --- a/fs/notify/fanotify/fanotify_user.c
> +++ b/fs/notify/fanotify/fanotify_user.c
> @@ -49,12 +49,13 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
>   * enough to fit in "count". Return an error pointer if the count
>   * is not large enough.
>   *
> - * Called with the group->notification_mutex held.
> + * Called with the group->notification_lock held.
>   */
>  static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  					    size_t count)
>  {
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
> +	       !spin_is_locked(&group->notification_lock));
>  
>  	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
>  
> @@ -64,7 +65,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  	if (FAN_EVENT_METADATA_LEN > count)
>  		return ERR_PTR(-EINVAL);
>  
> -	/* held the notification_mutex the whole time, so this is the
> +	/* held the notification_lock the whole time, so this is the
>  	 * same event we peeked above */
>  	return fsnotify_remove_first_event(group);
>  }
> @@ -244,10 +245,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait)
>  	int ret = 0;
>  
>  	poll_wait(file, &group->notification_waitq, wait);
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	if (!fsnotify_notify_queue_is_empty(group))
>  		ret = POLLIN | POLLRDNORM;
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	return ret;
>  }
> @@ -268,9 +269,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
>  
>  	add_wait_queue(&group->notification_waitq, &wait);
>  	while (1) {
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		kevent = get_one_event(group, count);
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  
>  		if (IS_ERR(kevent)) {
>  			ret = PTR_ERR(kevent);
> @@ -387,17 +388,17 @@ static int fanotify_release(struct inode *ignored, struct file *file)
>  	 * dequeue them and set the response. They will be freed once the
>  	 * response is consumed and fanotify_get_response() returns.
>  	 */
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	while (!fsnotify_notify_queue_is_empty(group)) {
>  		fsn_event = fsnotify_remove_first_event(group);
>  		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
> -			mutex_unlock(&group->notification_mutex);
> +			spin_unlock(&group->notification_lock);
>  			fsnotify_destroy_event(group, fsn_event);
> -			mutex_lock(&group->notification_mutex);
> +			spin_lock(&group->notification_lock);
>  		} else
>  			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
>  	}
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	/* Response for all permission events it set, wakeup waiters */
>  	wake_up(&group->fanotify_data.access_waitq);
> @@ -423,10 +424,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
>  
>  	switch (cmd) {
>  	case FIONREAD:
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		list_for_each_entry(fsn_event, &group->notification_list, list)
>  			send_len += FAN_EVENT_METADATA_LEN;
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		ret = put_user(send_len, (int __user *) p);
>  		break;
>  	}
> diff --git a/fs/notify/group.c b/fs/notify/group.c
> index b47f7cfdcaa4..fbe3cbebec16 100644
> --- a/fs/notify/group.c
> +++ b/fs/notify/group.c
> @@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
>   */
>  void fsnotify_group_stop_queueing(struct fsnotify_group *group)
>  {
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	group->shutdown = true;
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  }
>  
>  /*
> @@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
>  	atomic_set(&group->refcnt, 1);
>  	atomic_set(&group->num_marks, 0);
>  
> -	mutex_init(&group->notification_mutex);
> +	spin_lock_init(&group->notification_lock);
>  	INIT_LIST_HEAD(&group->notification_list);
>  	init_waitqueue_head(&group->notification_waitq);
>  	group->max_events = UINT_MAX;
> diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
> index b8d08d0d0a4d..69d1ea3d292a 100644
> --- a/fs/notify/inotify/inotify_user.c
> +++ b/fs/notify/inotify/inotify_user.c
> @@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
>  	int ret = 0;
>  
>  	poll_wait(file, &group->notification_waitq, wait);
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	if (!fsnotify_notify_queue_is_empty(group))
>  		ret = POLLIN | POLLRDNORM;
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	return ret;
>  }
> @@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event)
>   * enough to fit in "count". Return an error pointer if
>   * not large enough.
>   *
> - * Called with the group->notification_mutex held.
> + * Called with the group->notification_lock held.
>   */
>  static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  					    size_t count)
> @@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  	if (event_size > count)
>  		return ERR_PTR(-EINVAL);
>  
> -	/* held the notification_mutex the whole time, so this is the
> +	/* held the notification_lock the whole time, so this is the
>  	 * same event we peeked above */
>  	fsnotify_remove_first_event(group);
>  
> @@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
>  
>  	add_wait_queue(&group->notification_waitq, &wait);
>  	while (1) {
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		kevent = get_one_event(group, count);
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  
>  		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
>  
> @@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
>  
>  	switch (cmd) {
>  	case FIONREAD:
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		list_for_each_entry(fsn_event, &group->notification_list,
>  				    list) {
>  			send_len += sizeof(struct inotify_event);
>  			send_len += round_event_name_len(fsn_event);
>  		}
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		ret = put_user(send_len, (int __user *) p);
>  		break;
>  	}
> diff --git a/fs/notify/notification.c b/fs/notify/notification.c
> index 7d563dea52a4..8a7a8cd041e8 100644
> --- a/fs/notify/notification.c
> +++ b/fs/notify/notification.c
> @@ -63,7 +63,8 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
>  /* return true if the notify queue is empty, false otherwise */
>  bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
>  {
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
> +	       !spin_is_locked(&group->notification_lock));
>  	return list_empty(&group->notification_list) ? true : false;
>  }
>  
> @@ -95,10 +96,10 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  
>  	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
>  
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  
>  	if (group->shutdown) {
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		return 2;
>  	}
>  
> @@ -106,7 +107,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  		ret = 2;
>  		/* Queue overflow event only if it isn't already queued */
>  		if (!list_empty(&group->overflow_event->list)) {
> -			mutex_unlock(&group->notification_mutex);
> +			spin_unlock(&group->notification_lock);
>  			return ret;
>  		}
>  		event = group->overflow_event;
> @@ -116,7 +117,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  	if (!list_empty(list) && merge) {
>  		ret = merge(list, event);
>  		if (ret) {
> -			mutex_unlock(&group->notification_mutex);
> +			spin_unlock(&group->notification_lock);
>  			return ret;
>  		}
>  	}
> @@ -124,7 +125,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  queue:
>  	group->q_len++;
>  	list_add_tail(&event->list, list);
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	wake_up(&group->notification_waitq);
>  	kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
> @@ -139,7 +140,8 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
>  {
>  	struct fsnotify_event *event;
>  
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
> +	       !spin_is_locked(&group->notification_lock));
>  
>  	pr_debug("%s: group=%p\n", __func__, group);
>  
> @@ -161,7 +163,8 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
>   */
>  struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
>  {
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(IS_ENABLED(CONFIG_SMP) &&
> +	       !spin_is_locked(&group->notification_lock));
>  
>  	return list_first_entry(&group->notification_list,
>  				struct fsnotify_event, list);
> @@ -175,14 +178,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
>  {
>  	struct fsnotify_event *event;
>  
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	while (!fsnotify_notify_queue_is_empty(group)) {
>  		event = fsnotify_remove_first_event(group);
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		fsnotify_destroy_event(group, event);
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  	}
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  }
>  
>  /*
> diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
> index 7268ed076be8..0713e873b1c9 100644
> --- a/include/linux/fsnotify_backend.h
> +++ b/include/linux/fsnotify_backend.h
> @@ -135,7 +135,7 @@ struct fsnotify_group {
>  	const struct fsnotify_ops *ops;	/* how this group handles things */
>  
>  	/* needed to send notification to userspace */
> -	struct mutex notification_mutex;	/* protect the notification_list */
> +	spinlock_t notification_lock;		/* protect the notification_list */
>  	struct list_head notification_list;	/* list of event_holder this group needs to send to userspace */
>  	wait_queue_head_t notification_waitq;	/* read() on the notification file blocks on this waitq */
>  	unsigned int q_len;			/* events on the queue */
> -- 
> 2.6.6
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock
  2016-09-13 20:15 ` [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock Jan Kara
@ 2016-09-14 17:13   ` Lino Sanfilippo
  0 siblings, 0 replies; 4+ messages in thread
From: Lino Sanfilippo @ 2016-09-14 17:13 UTC (permalink / raw)
  To: Jan Kara, Andrew Morton
  Cc: linux-fsdevel, Miklos Szeredi, Eric Paris, Al Viro

On 13.09.2016 22:15, Jan Kara wrote:
> notification_mutex is used to protect the list of pending events. As
> such there's no reason to use a sleeping lock for it. Convert it to a
> spinlock.
> 
> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
>  fs/notify/fanotify/fanotify_user.c | 26 +++++++++++++-------------
>  fs/notify/group.c                  |  6 +++---
>  fs/notify/inotify/inotify_user.c   | 16 ++++++++--------
>  fs/notify/notification.c           | 24 ++++++++++++------------
>  include/linux/fsnotify_backend.h   |  2 +-
>  5 files changed, 37 insertions(+), 37 deletions(-)
> 
> diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
> index 46d135c4988f..e6f3fe9bb2ed 100644
> --- a/fs/notify/fanotify/fanotify_user.c
> +++ b/fs/notify/fanotify/fanotify_user.c
> @@ -49,12 +49,12 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
>   * enough to fit in "count". Return an error pointer if the count
>   * is not large enough.
>   *
> - * Called with the group->notification_mutex held.
> + * Called with the group->notification_lock held.
>   */
>  static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  					    size_t count)
>  {
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(!spin_is_locked(&group->notification_lock));
>  
>  	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
>  
> @@ -64,7 +64,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  	if (FAN_EVENT_METADATA_LEN > count)
>  		return ERR_PTR(-EINVAL);
>  
> -	/* held the notification_mutex the whole time, so this is the
> +	/* held the notification_lock the whole time, so this is the
>  	 * same event we peeked above */
>  	return fsnotify_remove_first_event(group);
>  }

Just a nitpick: The comment does not reflect the current code (we dont peek) any more so
it could be removed completely.

> @@ -244,10 +244,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait)
>  	int ret = 0;
>  
>  	poll_wait(file, &group->notification_waitq, wait);
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	if (!fsnotify_notify_queue_is_empty(group))
>  		ret = POLLIN | POLLRDNORM;
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	return ret;
>  }
> @@ -268,9 +268,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
>  
>  	add_wait_queue(&group->notification_waitq, &wait);
>  	while (1) {
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		kevent = get_one_event(group, count);
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  
>  		if (IS_ERR(kevent)) {
>  			ret = PTR_ERR(kevent);
> @@ -387,17 +387,17 @@ static int fanotify_release(struct inode *ignored, struct file *file)
>  	 * dequeue them and set the response. They will be freed once the
>  	 * response is consumed and fanotify_get_response() returns.
>  	 */
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	while (!fsnotify_notify_queue_is_empty(group)) {
>  		fsn_event = fsnotify_remove_first_event(group);
>  		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
> -			mutex_unlock(&group->notification_mutex);
> +			spin_unlock(&group->notification_lock);
>  			fsnotify_destroy_event(group, fsn_event);
> -			mutex_lock(&group->notification_mutex);
> +			spin_lock(&group->notification_lock);
>  		} else
>  			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
>  	}
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	/* Response for all permission events it set, wakeup waiters */
>  	wake_up(&group->fanotify_data.access_waitq);
> @@ -423,10 +423,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
>  
>  	switch (cmd) {
>  	case FIONREAD:
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		list_for_each_entry(fsn_event, &group->notification_list, list)
>  			send_len += FAN_EVENT_METADATA_LEN;
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		ret = put_user(send_len, (int __user *) p);
>  		break;
>  	}
> diff --git a/fs/notify/group.c b/fs/notify/group.c
> index b47f7cfdcaa4..fbe3cbebec16 100644
> --- a/fs/notify/group.c
> +++ b/fs/notify/group.c
> @@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
>   */
>  void fsnotify_group_stop_queueing(struct fsnotify_group *group)
>  {
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	group->shutdown = true;
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  }
>  
>  /*
> @@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
>  	atomic_set(&group->refcnt, 1);
>  	atomic_set(&group->num_marks, 0);
>  
> -	mutex_init(&group->notification_mutex);
> +	spin_lock_init(&group->notification_lock);
>  	INIT_LIST_HEAD(&group->notification_list);
>  	init_waitqueue_head(&group->notification_waitq);
>  	group->max_events = UINT_MAX;
> diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
> index b8d08d0d0a4d..69d1ea3d292a 100644
> --- a/fs/notify/inotify/inotify_user.c
> +++ b/fs/notify/inotify/inotify_user.c
> @@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
>  	int ret = 0;
>  
>  	poll_wait(file, &group->notification_waitq, wait);
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	if (!fsnotify_notify_queue_is_empty(group))
>  		ret = POLLIN | POLLRDNORM;
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	return ret;
>  }
> @@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event)
>   * enough to fit in "count". Return an error pointer if
>   * not large enough.
>   *
> - * Called with the group->notification_mutex held.
> + * Called with the group->notification_lock held.
>   */
>  static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  					    size_t count)
> @@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
>  	if (event_size > count)
>  		return ERR_PTR(-EINVAL);
>  
> -	/* held the notification_mutex the whole time, so this is the
> +	/* held the notification_lock the whole time, so this is the
>  	 * same event we peeked above */
>  	fsnotify_remove_first_event(group);
>  
> @@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
>  
>  	add_wait_queue(&group->notification_waitq, &wait);
>  	while (1) {
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		kevent = get_one_event(group, count);
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  
>  		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
>  
> @@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
>  
>  	switch (cmd) {
>  	case FIONREAD:
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  		list_for_each_entry(fsn_event, &group->notification_list,
>  				    list) {
>  			send_len += sizeof(struct inotify_event);
>  			send_len += round_event_name_len(fsn_event);
>  		}
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		ret = put_user(send_len, (int __user *) p);
>  		break;
>  	}
> diff --git a/fs/notify/notification.c b/fs/notify/notification.c
> index 7d563dea52a4..070d255b24a2 100644
> --- a/fs/notify/notification.c
> +++ b/fs/notify/notification.c
> @@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
>  /* return true if the notify queue is empty, false otherwise */
>  bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
>  {
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(!spin_is_locked(&group->notification_lock));
>  	return list_empty(&group->notification_list) ? true : false;
>  }
>  
> @@ -95,10 +95,10 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  
>  	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
>  
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  
>  	if (group->shutdown) {
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		return 2;
>  	}
>  
> @@ -106,7 +106,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  		ret = 2;
>  		/* Queue overflow event only if it isn't already queued */
>  		if (!list_empty(&group->overflow_event->list)) {
> -			mutex_unlock(&group->notification_mutex);
> +			spin_unlock(&group->notification_lock);
>  			return ret;
>  		}
>  		event = group->overflow_event;
> @@ -116,7 +116,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  	if (!list_empty(list) && merge) {
>  		ret = merge(list, event);
>  		if (ret) {
> -			mutex_unlock(&group->notification_mutex);
> +			spin_unlock(&group->notification_lock);
>  			return ret;
>  		}
>  	}
> @@ -124,7 +124,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
>  queue:
>  	group->q_len++;
>  	list_add_tail(&event->list, list);
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  
>  	wake_up(&group->notification_waitq);
>  	kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
> @@ -139,7 +139,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
>  {
>  	struct fsnotify_event *event;
>  
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(!spin_is_locked(&group->notification_lock));
>  
>  	pr_debug("%s: group=%p\n", __func__, group);
>  
> @@ -161,7 +161,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
>   */
>  struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
>  {
> -	BUG_ON(!mutex_is_locked(&group->notification_mutex));
> +	BUG_ON(!spin_is_locked(&group->notification_lock));
>  
>  	return list_first_entry(&group->notification_list,
>  				struct fsnotify_event, list);
> @@ -175,14 +175,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
>  {
>  	struct fsnotify_event *event;
>  
> -	mutex_lock(&group->notification_mutex);
> +	spin_lock(&group->notification_lock);
>  	while (!fsnotify_notify_queue_is_empty(group)) {
>  		event = fsnotify_remove_first_event(group);
> -		mutex_unlock(&group->notification_mutex);
> +		spin_unlock(&group->notification_lock);
>  		fsnotify_destroy_event(group, event);
> -		mutex_lock(&group->notification_mutex);
> +		spin_lock(&group->notification_lock);
>  	}
> -	mutex_unlock(&group->notification_mutex);
> +	spin_unlock(&group->notification_lock);
>  }
>  
>  /*
> diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
> index 7268ed076be8..0713e873b1c9 100644
> --- a/include/linux/fsnotify_backend.h
> +++ b/include/linux/fsnotify_backend.h
> @@ -135,7 +135,7 @@ struct fsnotify_group {
>  	const struct fsnotify_ops *ops;	/* how this group handles things */
>  
>  	/* needed to send notification to userspace */
> -	struct mutex notification_mutex;	/* protect the notification_list */
> +	spinlock_t notification_lock;		/* protect the notification_list */
>  	struct list_head notification_list;	/* list of event_holder this group needs to send to userspace */
>  	wait_queue_head_t notification_waitq;	/* read() on the notification file blocks on this waitq */
>  	unsigned int q_len;			/* events on the queue */
> 

Reviewed-by: Lino Sanfilippo <LinoSanfilippo@gmx.de>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock
  2016-09-13 20:15 [PATCH 0/6 v2] fsnotify: Fix list corruption for permission events Jan Kara
@ 2016-09-13 20:15 ` Jan Kara
  2016-09-14 17:13   ` Lino Sanfilippo
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Kara @ 2016-09-13 20:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-fsdevel, Miklos Szeredi, Lino Sanfilippo, Eric Paris,
	Al Viro, Jan Kara

notification_mutex is used to protect the list of pending events. As
such there's no reason to use a sleeping lock for it. Convert it to a
spinlock.

Signed-off-by: Jan Kara <jack@suse.cz>
---
 fs/notify/fanotify/fanotify_user.c | 26 +++++++++++++-------------
 fs/notify/group.c                  |  6 +++---
 fs/notify/inotify/inotify_user.c   | 16 ++++++++--------
 fs/notify/notification.c           | 24 ++++++++++++------------
 include/linux/fsnotify_backend.h   |  2 +-
 5 files changed, 37 insertions(+), 37 deletions(-)

diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 46d135c4988f..e6f3fe9bb2ed 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -49,12 +49,12 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
  * enough to fit in "count". Return an error pointer if the count
  * is not large enough.
  *
- * Called with the group->notification_mutex held.
+ * Called with the group->notification_lock held.
  */
 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 					    size_t count)
 {
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(!spin_is_locked(&group->notification_lock));
 
 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 
@@ -64,7 +64,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 	if (FAN_EVENT_METADATA_LEN > count)
 		return ERR_PTR(-EINVAL);
 
-	/* held the notification_mutex the whole time, so this is the
+	/* held the notification_lock the whole time, so this is the
 	 * same event we peeked above */
 	return fsnotify_remove_first_event(group);
 }
@@ -244,10 +244,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait)
 	int ret = 0;
 
 	poll_wait(file, &group->notification_waitq, wait);
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	if (!fsnotify_notify_queue_is_empty(group))
 		ret = POLLIN | POLLRDNORM;
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	return ret;
 }
@@ -268,9 +268,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
 
 	add_wait_queue(&group->notification_waitq, &wait);
 	while (1) {
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		kevent = get_one_event(group, count);
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 
 		if (IS_ERR(kevent)) {
 			ret = PTR_ERR(kevent);
@@ -387,17 +387,17 @@ static int fanotify_release(struct inode *ignored, struct file *file)
 	 * dequeue them and set the response. They will be freed once the
 	 * response is consumed and fanotify_get_response() returns.
 	 */
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	while (!fsnotify_notify_queue_is_empty(group)) {
 		fsn_event = fsnotify_remove_first_event(group);
 		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
-			mutex_unlock(&group->notification_mutex);
+			spin_unlock(&group->notification_lock);
 			fsnotify_destroy_event(group, fsn_event);
-			mutex_lock(&group->notification_mutex);
+			spin_lock(&group->notification_lock);
 		} else
 			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
 	}
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	/* Response for all permission events it set, wakeup waiters */
 	wake_up(&group->fanotify_data.access_waitq);
@@ -423,10 +423,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar
 
 	switch (cmd) {
 	case FIONREAD:
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		list_for_each_entry(fsn_event, &group->notification_list, list)
 			send_len += FAN_EVENT_METADATA_LEN;
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		ret = put_user(send_len, (int __user *) p);
 		break;
 	}
diff --git a/fs/notify/group.c b/fs/notify/group.c
index b47f7cfdcaa4..fbe3cbebec16 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
  */
 void fsnotify_group_stop_queueing(struct fsnotify_group *group)
 {
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	group->shutdown = true;
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 }
 
 /*
@@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
 	atomic_set(&group->refcnt, 1);
 	atomic_set(&group->num_marks, 0);
 
-	mutex_init(&group->notification_mutex);
+	spin_lock_init(&group->notification_lock);
 	INIT_LIST_HEAD(&group->notification_list);
 	init_waitqueue_head(&group->notification_waitq);
 	group->max_events = UINT_MAX;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index b8d08d0d0a4d..69d1ea3d292a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
 	int ret = 0;
 
 	poll_wait(file, &group->notification_waitq, wait);
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	if (!fsnotify_notify_queue_is_empty(group))
 		ret = POLLIN | POLLRDNORM;
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	return ret;
 }
@@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event)
  * enough to fit in "count". Return an error pointer if
  * not large enough.
  *
- * Called with the group->notification_mutex held.
+ * Called with the group->notification_lock held.
  */
 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 					    size_t count)
@@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
 	if (event_size > count)
 		return ERR_PTR(-EINVAL);
 
-	/* held the notification_mutex the whole time, so this is the
+	/* held the notification_lock the whole time, so this is the
 	 * same event we peeked above */
 	fsnotify_remove_first_event(group);
 
@@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
 
 	add_wait_queue(&group->notification_waitq, &wait);
 	while (1) {
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		kevent = get_one_event(group, count);
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 
 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
 
@@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
 
 	switch (cmd) {
 	case FIONREAD:
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 		list_for_each_entry(fsn_event, &group->notification_list,
 				    list) {
 			send_len += sizeof(struct inotify_event);
 			send_len += round_event_name_len(fsn_event);
 		}
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		ret = put_user(send_len, (int __user *) p);
 		break;
 	}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 7d563dea52a4..070d255b24a2 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
 /* return true if the notify queue is empty, false otherwise */
 bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
 {
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(!spin_is_locked(&group->notification_lock));
 	return list_empty(&group->notification_list) ? true : false;
 }
 
@@ -95,10 +95,10 @@ int fsnotify_add_event(struct fsnotify_group *group,
 
 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 
 	if (group->shutdown) {
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		return 2;
 	}
 
@@ -106,7 +106,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
 		ret = 2;
 		/* Queue overflow event only if it isn't already queued */
 		if (!list_empty(&group->overflow_event->list)) {
-			mutex_unlock(&group->notification_mutex);
+			spin_unlock(&group->notification_lock);
 			return ret;
 		}
 		event = group->overflow_event;
@@ -116,7 +116,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
 	if (!list_empty(list) && merge) {
 		ret = merge(list, event);
 		if (ret) {
-			mutex_unlock(&group->notification_mutex);
+			spin_unlock(&group->notification_lock);
 			return ret;
 		}
 	}
@@ -124,7 +124,7 @@ int fsnotify_add_event(struct fsnotify_group *group,
 queue:
 	group->q_len++;
 	list_add_tail(&event->list, list);
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 
 	wake_up(&group->notification_waitq);
 	kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
@@ -139,7 +139,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
 {
 	struct fsnotify_event *event;
 
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(!spin_is_locked(&group->notification_lock));
 
 	pr_debug("%s: group=%p\n", __func__, group);
 
@@ -161,7 +161,7 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
  */
 struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
 {
-	BUG_ON(!mutex_is_locked(&group->notification_mutex));
+	BUG_ON(!spin_is_locked(&group->notification_lock));
 
 	return list_first_entry(&group->notification_list,
 				struct fsnotify_event, list);
@@ -175,14 +175,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
 {
 	struct fsnotify_event *event;
 
-	mutex_lock(&group->notification_mutex);
+	spin_lock(&group->notification_lock);
 	while (!fsnotify_notify_queue_is_empty(group)) {
 		event = fsnotify_remove_first_event(group);
-		mutex_unlock(&group->notification_mutex);
+		spin_unlock(&group->notification_lock);
 		fsnotify_destroy_event(group, event);
-		mutex_lock(&group->notification_mutex);
+		spin_lock(&group->notification_lock);
 	}
-	mutex_unlock(&group->notification_mutex);
+	spin_unlock(&group->notification_lock);
 }
 
 /*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7268ed076be8..0713e873b1c9 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -135,7 +135,7 @@ struct fsnotify_group {
 	const struct fsnotify_ops *ops;	/* how this group handles things */
 
 	/* needed to send notification to userspace */
-	struct mutex notification_mutex;	/* protect the notification_list */
+	spinlock_t notification_lock;		/* protect the notification_list */
 	struct list_head notification_list;	/* list of event_holder this group needs to send to userspace */
 	wait_queue_head_t notification_waitq;	/* read() on the notification file blocks on this waitq */
 	unsigned int q_len;			/* events on the queue */
-- 
2.6.6


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2016-09-16 15:55 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-16 13:12 [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock Jan Kara
2016-09-16 15:55 ` Guenter Roeck
  -- strict thread matches above, loose matches on Subject: below --
2016-09-13 20:15 [PATCH 0/6 v2] fsnotify: Fix list corruption for permission events Jan Kara
2016-09-13 20:15 ` [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock Jan Kara
2016-09-14 17:13   ` Lino Sanfilippo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).