From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from bh-25.webhostbox.net ([208.91.199.152]:55590 "EHLO bh-25.webhostbox.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S964901AbcIPPzK (ORCPT ); Fri, 16 Sep 2016 11:55:10 -0400 Date: Fri, 16 Sep 2016 08:55:03 -0700 From: Guenter Roeck To: Jan Kara Cc: Andrew Morton , linux-fsdevel@vger.kernel.org, Miklos Szeredi , Lino Sanfilippo , Eric Paris Subject: Re: [PATCH 4/6] fsnotify: Convert notification_mutex to a spinlock Message-ID: <20160916155503.GA1651@roeck-us.net> References: <1474031567-1831-1-git-send-email-jack@suse.cz> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1474031567-1831-1-git-send-email-jack@suse.cz> Sender: linux-fsdevel-owner@vger.kernel.org List-ID: On Fri, Sep 16, 2016 at 03:12:47PM +0200, Jan Kara wrote: > notification_mutex is used to protect the list of pending events. As > such there's no reason to use a sleeping lock for it. Convert it to a > spinlock. > > Reviewed-by: Lino Sanfilippo > Signed-off-by: Jan Kara Tested-by: Guenter Roeck > --- > fs/notify/fanotify/fanotify_user.c | 27 ++++++++++++++------------- > fs/notify/group.c | 6 +++--- > fs/notify/inotify/inotify_user.c | 16 ++++++++-------- > fs/notify/notification.c | 27 +++++++++++++++------------ > include/linux/fsnotify_backend.h | 2 +- > 5 files changed, 41 insertions(+), 37 deletions(-) > > This is a fixed version of the patch that fixes the BUG_ON hitting on UP > kernels. > > diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c > index 46d135c4988f..80091a5dc8c0 100644 > --- a/fs/notify/fanotify/fanotify_user.c > +++ b/fs/notify/fanotify/fanotify_user.c > @@ -49,12 +49,13 @@ struct kmem_cache *fanotify_perm_event_cachep __read_mostly; > * enough to fit in "count". Return an error pointer if the count > * is not large enough. > * > - * Called with the group->notification_mutex held. > + * Called with the group->notification_lock held. > */ > static struct fsnotify_event *get_one_event(struct fsnotify_group *group, > size_t count) > { > - BUG_ON(!mutex_is_locked(&group->notification_mutex)); > + BUG_ON(IS_ENABLED(CONFIG_SMP) && > + !spin_is_locked(&group->notification_lock)); > > pr_debug("%s: group=%p count=%zd\n", __func__, group, count); > > @@ -64,7 +65,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, > if (FAN_EVENT_METADATA_LEN > count) > return ERR_PTR(-EINVAL); > > - /* held the notification_mutex the whole time, so this is the > + /* held the notification_lock the whole time, so this is the > * same event we peeked above */ > return fsnotify_remove_first_event(group); > } > @@ -244,10 +245,10 @@ static unsigned int fanotify_poll(struct file *file, poll_table *wait) > int ret = 0; > > poll_wait(file, &group->notification_waitq, wait); > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > if (!fsnotify_notify_queue_is_empty(group)) > ret = POLLIN | POLLRDNORM; > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > > return ret; > } > @@ -268,9 +269,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, > > add_wait_queue(&group->notification_waitq, &wait); > while (1) { > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > kevent = get_one_event(group, count); > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > > if (IS_ERR(kevent)) { > ret = PTR_ERR(kevent); > @@ -387,17 +388,17 @@ static int fanotify_release(struct inode *ignored, struct file *file) > * dequeue them and set the response. They will be freed once the > * response is consumed and fanotify_get_response() returns. > */ > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > while (!fsnotify_notify_queue_is_empty(group)) { > fsn_event = fsnotify_remove_first_event(group); > if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) { > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > fsnotify_destroy_event(group, fsn_event); > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > } else > FANOTIFY_PE(fsn_event)->response = FAN_ALLOW; > } > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > > /* Response for all permission events it set, wakeup waiters */ > wake_up(&group->fanotify_data.access_waitq); > @@ -423,10 +424,10 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar > > switch (cmd) { > case FIONREAD: > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > list_for_each_entry(fsn_event, &group->notification_list, list) > send_len += FAN_EVENT_METADATA_LEN; > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > ret = put_user(send_len, (int __user *) p); > break; > } > diff --git a/fs/notify/group.c b/fs/notify/group.c > index b47f7cfdcaa4..fbe3cbebec16 100644 > --- a/fs/notify/group.c > +++ b/fs/notify/group.c > @@ -45,9 +45,9 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group) > */ > void fsnotify_group_stop_queueing(struct fsnotify_group *group) > { > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > group->shutdown = true; > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > } > > /* > @@ -125,7 +125,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops) > atomic_set(&group->refcnt, 1); > atomic_set(&group->num_marks, 0); > > - mutex_init(&group->notification_mutex); > + spin_lock_init(&group->notification_lock); > INIT_LIST_HEAD(&group->notification_list); > init_waitqueue_head(&group->notification_waitq); > group->max_events = UINT_MAX; > diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c > index b8d08d0d0a4d..69d1ea3d292a 100644 > --- a/fs/notify/inotify/inotify_user.c > +++ b/fs/notify/inotify/inotify_user.c > @@ -115,10 +115,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) > int ret = 0; > > poll_wait(file, &group->notification_waitq, wait); > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > if (!fsnotify_notify_queue_is_empty(group)) > ret = POLLIN | POLLRDNORM; > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > > return ret; > } > @@ -138,7 +138,7 @@ static int round_event_name_len(struct fsnotify_event *fsn_event) > * enough to fit in "count". Return an error pointer if > * not large enough. > * > - * Called with the group->notification_mutex held. > + * Called with the group->notification_lock held. > */ > static struct fsnotify_event *get_one_event(struct fsnotify_group *group, > size_t count) > @@ -157,7 +157,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, > if (event_size > count) > return ERR_PTR(-EINVAL); > > - /* held the notification_mutex the whole time, so this is the > + /* held the notification_lock the whole time, so this is the > * same event we peeked above */ > fsnotify_remove_first_event(group); > > @@ -234,9 +234,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, > > add_wait_queue(&group->notification_waitq, &wait); > while (1) { > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > kevent = get_one_event(group, count); > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > > pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); > > @@ -300,13 +300,13 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, > > switch (cmd) { > case FIONREAD: > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > list_for_each_entry(fsn_event, &group->notification_list, > list) { > send_len += sizeof(struct inotify_event); > send_len += round_event_name_len(fsn_event); > } > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > ret = put_user(send_len, (int __user *) p); > break; > } > diff --git a/fs/notify/notification.c b/fs/notify/notification.c > index 7d563dea52a4..8a7a8cd041e8 100644 > --- a/fs/notify/notification.c > +++ b/fs/notify/notification.c > @@ -63,7 +63,8 @@ EXPORT_SYMBOL_GPL(fsnotify_get_cookie); > /* return true if the notify queue is empty, false otherwise */ > bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) > { > - BUG_ON(!mutex_is_locked(&group->notification_mutex)); > + BUG_ON(IS_ENABLED(CONFIG_SMP) && > + !spin_is_locked(&group->notification_lock)); > return list_empty(&group->notification_list) ? true : false; > } > > @@ -95,10 +96,10 @@ int fsnotify_add_event(struct fsnotify_group *group, > > pr_debug("%s: group=%p event=%p\n", __func__, group, event); > > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > > if (group->shutdown) { > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > return 2; > } > > @@ -106,7 +107,7 @@ int fsnotify_add_event(struct fsnotify_group *group, > ret = 2; > /* Queue overflow event only if it isn't already queued */ > if (!list_empty(&group->overflow_event->list)) { > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > return ret; > } > event = group->overflow_event; > @@ -116,7 +117,7 @@ int fsnotify_add_event(struct fsnotify_group *group, > if (!list_empty(list) && merge) { > ret = merge(list, event); > if (ret) { > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > return ret; > } > } > @@ -124,7 +125,7 @@ int fsnotify_add_event(struct fsnotify_group *group, > queue: > group->q_len++; > list_add_tail(&event->list, list); > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > > wake_up(&group->notification_waitq); > kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); > @@ -139,7 +140,8 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group) > { > struct fsnotify_event *event; > > - BUG_ON(!mutex_is_locked(&group->notification_mutex)); > + BUG_ON(IS_ENABLED(CONFIG_SMP) && > + !spin_is_locked(&group->notification_lock)); > > pr_debug("%s: group=%p\n", __func__, group); > > @@ -161,7 +163,8 @@ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group) > */ > struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group) > { > - BUG_ON(!mutex_is_locked(&group->notification_mutex)); > + BUG_ON(IS_ENABLED(CONFIG_SMP) && > + !spin_is_locked(&group->notification_lock)); > > return list_first_entry(&group->notification_list, > struct fsnotify_event, list); > @@ -175,14 +178,14 @@ void fsnotify_flush_notify(struct fsnotify_group *group) > { > struct fsnotify_event *event; > > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > while (!fsnotify_notify_queue_is_empty(group)) { > event = fsnotify_remove_first_event(group); > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > fsnotify_destroy_event(group, event); > - mutex_lock(&group->notification_mutex); > + spin_lock(&group->notification_lock); > } > - mutex_unlock(&group->notification_mutex); > + spin_unlock(&group->notification_lock); > } > > /* > diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h > index 7268ed076be8..0713e873b1c9 100644 > --- a/include/linux/fsnotify_backend.h > +++ b/include/linux/fsnotify_backend.h > @@ -135,7 +135,7 @@ struct fsnotify_group { > const struct fsnotify_ops *ops; /* how this group handles things */ > > /* needed to send notification to userspace */ > - struct mutex notification_mutex; /* protect the notification_list */ > + spinlock_t notification_lock; /* protect the notification_list */ > struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ > wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ > unsigned int q_len; /* events on the queue */ > -- > 2.6.6 >