All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] iio:event: Fix and cleanup locking
@ 2014-02-14 18:49 Lars-Peter Clausen
  2014-02-18 10:54 ` Jonathan Cameron
  0 siblings, 1 reply; 3+ messages in thread
From: Lars-Peter Clausen @ 2014-02-14 18:49 UTC (permalink / raw)
  To: Jonathan Cameron; +Cc: linux-iio, Lars-Peter Clausen

The event code currently holds a spinlock with IRQs disabled while calling
kfifo_to_user(). kfifo_to_user() can generate a page fault though, which means
we have to be able to sleep, which is not possible if the interrupts are
disabled. The good thing is that kfifo handles concurrent read and write access
just fine as long as there is only one reader and one writer, so we do not any
locking to protect against concurrent access from the read and writer thread. It
is possible though that userspace is trying to read from the event FIFO from
multiple concurrent threads, so we need to add locking to protect against this.
This is done using a mutex. The mutex will only protect the kfifo_to_user()
call, it will not protect the waitqueue. This means that multiple threads can be
waiting for new data and once a new event is added to the FIFO all waiting
threads will be woken up. If one of those threads is unable to read any data
(because another thread already read all the data) it will go back to sleep. The
only remaining issue is that now that the clearing of the BUSY flag and the
emptying of the FIFO does no longer happen in one atomic step it is possible
that a event is added to the FIFO after it has been emptied and this sample will
be visible the next time a new event file descriptor is created. To avoid this
rather move the emptying of the FIFO from iio_event_chrdev_release to
iio_event_getfd().

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
---
 drivers/iio/industrialio-event.c | 83 ++++++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 42 deletions(-)

diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index c9c1419..0719443 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -40,6 +40,7 @@ struct iio_event_interface {
 	struct list_head	dev_attr_list;
 	unsigned long		flags;
 	struct attribute_group	group;
+	struct mutex		read_lock;
 };
 
 /**
@@ -47,16 +48,17 @@ struct iio_event_interface {
  * @indio_dev:		IIO device structure
  * @ev_code:		What event
  * @timestamp:		When the event occurred
+ *
+ * Note: The caller must make sure that this function is not running
+ * concurrently for the same indio_dev more than once.
  **/
 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
 {
 	struct iio_event_interface *ev_int = indio_dev->event_interface;
 	struct iio_event_data ev;
-	unsigned long flags;
 	int copied;
 
 	/* Does anyone care? */
-	spin_lock_irqsave(&ev_int->wait.lock, flags);
 	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
 
 		ev.id = ev_code;
@@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
 
 		copied = kfifo_put(&ev_int->det_events, ev);
 		if (copied != 0)
-			wake_up_locked_poll(&ev_int->wait, POLLIN);
+			wake_up_poll(&ev_int->wait, POLLIN);
 	}
-	spin_unlock_irqrestore(&ev_int->wait.lock, flags);
 
 	return 0;
 }
@@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
 
 	poll_wait(filep, &ev_int->wait, wait);
 
-	spin_lock_irq(&ev_int->wait.lock);
 	if (!kfifo_is_empty(&ev_int->det_events))
 		events = POLLIN | POLLRDNORM;
-	spin_unlock_irq(&ev_int->wait.lock);
 
 	return events;
 }
@@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
 	if (count < sizeof(struct iio_event_data))
 		return -EINVAL;
 
-	spin_lock_irq(&ev_int->wait.lock);
-	if (kfifo_is_empty(&ev_int->det_events)) {
-		if (filep->f_flags & O_NONBLOCK) {
-			ret = -EAGAIN;
-			goto error_unlock;
-		}
-		/* Blocking on device; waiting for something to be there */
-		ret = wait_event_interruptible_locked_irq(ev_int->wait,
+	do {
+		if (kfifo_is_empty(&ev_int->det_events)) {
+			if (filep->f_flags & O_NONBLOCK)
+				return -EAGAIN;
+
+			ret = wait_event_interruptible(ev_int->wait,
 					!kfifo_is_empty(&ev_int->det_events) ||
 					indio_dev->info == NULL);
-		if (ret)
-			goto error_unlock;
-		if (indio_dev->info == NULL) {
-			ret = -ENODEV;
-			goto error_unlock;
+			if (ret)
+				return ret;
+			if (indio_dev->info == NULL)
+				return -ENODEV;
 		}
-		/* Single access device so no one else can get the data */
-	}
 
-	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+		if (mutex_lock_interruptible(&ev_int->read_lock))
+			return -ERESTARTSYS;
+		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
+		mutex_unlock(&ev_int->read_lock);
+
+		if (ret)
+			return ret;
+
+		/*
+		 * If we couldn't read anything from the fifo (a different
+		 * thread might have been faster) we either return -EAGAIN if
+		 * the file descriptor is non-blocking, otherwise we go back to
+		 * sleep and wait for more data to arrive.
+		 */
+		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
+			return -EAGAIN;
 
-error_unlock:
-	spin_unlock_irq(&ev_int->wait.lock);
+	} while (copied == 0);
 
-	return ret ? ret : copied;
+	return copied;
 }
 
 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
@@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
 	struct iio_dev *indio_dev = filep->private_data;
 	struct iio_event_interface *ev_int = indio_dev->event_interface;
 
-	spin_lock_irq(&ev_int->wait.lock);
-	__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
-	/*
-	 * In order to maintain a clean state for reopening,
-	 * clear out any awaiting events. The mask will prevent
-	 * any new __iio_push_event calls running.
-	 */
-	kfifo_reset_out(&ev_int->det_events);
-	spin_unlock_irq(&ev_int->wait.lock);
+	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
 
 	iio_device_put(indio_dev);
 
@@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
 	if (ev_int == NULL)
 		return -ENODEV;
 
-	spin_lock_irq(&ev_int->wait.lock);
-	if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
-		spin_unlock_irq(&ev_int->wait.lock);
+	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
 		return -EBUSY;
-	}
-	spin_unlock_irq(&ev_int->wait.lock);
+
 	iio_device_get(indio_dev);
 
 	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
 				indio_dev, O_RDONLY | O_CLOEXEC);
 	if (fd < 0) {
-		spin_lock_irq(&ev_int->wait.lock);
-		__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
-		spin_unlock_irq(&ev_int->wait.lock);
+		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
 		iio_device_put(indio_dev);
+	} else {
+		kfifo_reset_out(&ev_int->det_events);
 	}
+
 	return fd;
 }
 
@@ -425,6 +423,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
 {
 	INIT_KFIFO(ev_int->det_events);
 	init_waitqueue_head(&ev_int->wait);
+	mutex_init(&ev_int->read_lock);
 }
 
 static const char *iio_event_group_name = "events";
-- 
1.8.0

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] iio:event: Fix and cleanup locking
  2014-02-14 18:49 [PATCH] iio:event: Fix and cleanup locking Lars-Peter Clausen
@ 2014-02-18 10:54 ` Jonathan Cameron
  2014-02-18 11:37   ` Lars-Peter Clausen
  0 siblings, 1 reply; 3+ messages in thread
From: Jonathan Cameron @ 2014-02-18 10:54 UTC (permalink / raw)
  To: Lars-Peter Clausen; +Cc: linux-iio

On 14/02/14 18:49, Lars-Peter Clausen wrote:
> The event code currently holds a spinlock with IRQs disabled while calling
> kfifo_to_user(). kfifo_to_user() can generate a page fault though, which means
> we have to be able to sleep, which is not possible if the interrupts are
> disabled. The good thing is that kfifo handles concurrent read and write access
> just fine as long as there is only one reader and one writer, so we do not any
> locking to protect against concurrent access from the read and writer thread. It
> is possible though that userspace is trying to read from the event FIFO from
> multiple concurrent threads, so we need to add locking to protect against this.
> This is done using a mutex. The mutex will only protect the kfifo_to_user()
> call, it will not protect the waitqueue. This means that multiple threads can be
> waiting for new data and once a new event is added to the FIFO all waiting
> threads will be woken up. If one of those threads is unable to read any data
> (because another thread already read all the data) it will go back to sleep. The
> only remaining issue is that now that the clearing of the BUSY flag and the
> emptying of the FIFO does no longer happen in one atomic step it is possible
> that a event is added to the FIFO after it has been emptied and this sample will
> be visible the next time a new event file descriptor is created. To avoid this
> rather move the emptying of the FIFO from iio_event_chrdev_release to
> iio_event_getfd().
>
> Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
The patch looks fine.  I'm a little worried that's too involved for this stage
in the merge cycle.  So is this an issue observed in the wild (in which case
I probably want to get it upstream asap) or is it just a theoretical issue
at the moment? (e.g. can I leave it for the next merge window).

Actually without the spin lock in getfd, is there any path where it would allocate
two anon file descriptors?

Jonathan
> ---
>   drivers/iio/industrialio-event.c | 83 ++++++++++++++++++++--------------------
>   1 file changed, 41 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
> index c9c1419..0719443 100644
> --- a/drivers/iio/industrialio-event.c
> +++ b/drivers/iio/industrialio-event.c
> @@ -40,6 +40,7 @@ struct iio_event_interface {
>   	struct list_head	dev_attr_list;
>   	unsigned long		flags;
>   	struct attribute_group	group;
> +	struct mutex		read_lock;
>   };
>
>   /**
> @@ -47,16 +48,17 @@ struct iio_event_interface {
>    * @indio_dev:		IIO device structure
>    * @ev_code:		What event
>    * @timestamp:		When the event occurred
> + *
> + * Note: The caller must make sure that this function is not running
> + * concurrently for the same indio_dev more than once.
>    **/
>   int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
>   {
>   	struct iio_event_interface *ev_int = indio_dev->event_interface;
>   	struct iio_event_data ev;
> -	unsigned long flags;
>   	int copied;
>
>   	/* Does anyone care? */
> -	spin_lock_irqsave(&ev_int->wait.lock, flags);
>   	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
>
>   		ev.id = ev_code;
> @@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
>
>   		copied = kfifo_put(&ev_int->det_events, ev);
>   		if (copied != 0)
> -			wake_up_locked_poll(&ev_int->wait, POLLIN);
> +			wake_up_poll(&ev_int->wait, POLLIN);
>   	}
> -	spin_unlock_irqrestore(&ev_int->wait.lock, flags);
>
>   	return 0;
>   }
> @@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
>
>   	poll_wait(filep, &ev_int->wait, wait);
>
> -	spin_lock_irq(&ev_int->wait.lock);
>   	if (!kfifo_is_empty(&ev_int->det_events))
>   		events = POLLIN | POLLRDNORM;
> -	spin_unlock_irq(&ev_int->wait.lock);
>
>   	return events;
>   }
> @@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
>   	if (count < sizeof(struct iio_event_data))
>   		return -EINVAL;
>
> -	spin_lock_irq(&ev_int->wait.lock);
> -	if (kfifo_is_empty(&ev_int->det_events)) {
> -		if (filep->f_flags & O_NONBLOCK) {
> -			ret = -EAGAIN;
> -			goto error_unlock;
> -		}
> -		/* Blocking on device; waiting for something to be there */
> -		ret = wait_event_interruptible_locked_irq(ev_int->wait,
> +	do {
> +		if (kfifo_is_empty(&ev_int->det_events)) {
> +			if (filep->f_flags & O_NONBLOCK)
> +				return -EAGAIN;
> +
> +			ret = wait_event_interruptible(ev_int->wait,
>   					!kfifo_is_empty(&ev_int->det_events) ||
>   					indio_dev->info == NULL);
> -		if (ret)
> -			goto error_unlock;
> -		if (indio_dev->info == NULL) {
> -			ret = -ENODEV;
> -			goto error_unlock;
> +			if (ret)
> +				return ret;
> +			if (indio_dev->info == NULL)
> +				return -ENODEV;
>   		}
> -		/* Single access device so no one else can get the data */
> -	}
>
> -	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
> +		if (mutex_lock_interruptible(&ev_int->read_lock))
> +			return -ERESTARTSYS;
> +		ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
> +		mutex_unlock(&ev_int->read_lock);
> +
> +		if (ret)
> +			return ret;
> +
> +		/*
> +		 * If we couldn't read anything from the fifo (a different
> +		 * thread might have been faster) we either return -EAGAIN if
> +		 * the file descriptor is non-blocking, otherwise we go back to
> +		 * sleep and wait for more data to arrive.
> +		 */
> +		if (copied == 0 && (filep->f_flags & O_NONBLOCK))
> +			return -EAGAIN;
>
> -error_unlock:
> -	spin_unlock_irq(&ev_int->wait.lock);
> +	} while (copied == 0);
>
> -	return ret ? ret : copied;
> +	return copied;
>   }
>
>   static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
> @@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
>   	struct iio_dev *indio_dev = filep->private_data;
>   	struct iio_event_interface *ev_int = indio_dev->event_interface;
>
> -	spin_lock_irq(&ev_int->wait.lock);
> -	__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
> -	/*
> -	 * In order to maintain a clean state for reopening,
> -	 * clear out any awaiting events. The mask will prevent
> -	 * any new __iio_push_event calls running.
> -	 */
> -	kfifo_reset_out(&ev_int->det_events);
> -	spin_unlock_irq(&ev_int->wait.lock);
> +	clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
>
>   	iio_device_put(indio_dev);
>
> @@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
>   	if (ev_int == NULL)
>   		return -ENODEV;
>
> -	spin_lock_irq(&ev_int->wait.lock);
> -	if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
> -		spin_unlock_irq(&ev_int->wait.lock);
> +	if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
>   		return -EBUSY;
> -	}
> -	spin_unlock_irq(&ev_int->wait.lock);
> +
>   	iio_device_get(indio_dev);
>
>   	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
>   				indio_dev, O_RDONLY | O_CLOEXEC);
>   	if (fd < 0) {
> -		spin_lock_irq(&ev_int->wait.lock);
> -		__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
> -		spin_unlock_irq(&ev_int->wait.lock);
> +		clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
>   		iio_device_put(indio_dev);
> +	} else {
> +		kfifo_reset_out(&ev_int->det_events);
>   	}
> +
>   	return fd;
>   }
>
> @@ -425,6 +423,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
>   {
>   	INIT_KFIFO(ev_int->det_events);
>   	init_waitqueue_head(&ev_int->wait);
> +	mutex_init(&ev_int->read_lock);
>   }
>
>   static const char *iio_event_group_name = "events";
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] iio:event: Fix and cleanup locking
  2014-02-18 10:54 ` Jonathan Cameron
@ 2014-02-18 11:37   ` Lars-Peter Clausen
  0 siblings, 0 replies; 3+ messages in thread
From: Lars-Peter Clausen @ 2014-02-18 11:37 UTC (permalink / raw)
  To: Jonathan Cameron; +Cc: linux-iio

On 02/18/2014 11:54 AM, Jonathan Cameron wrote:
> On 14/02/14 18:49, Lars-Peter Clausen wrote:
>> The event code currently holds a spinlock with IRQs disabled while calling
>> kfifo_to_user(). kfifo_to_user() can generate a page fault though, which
>> means
>> we have to be able to sleep, which is not possible if the interrupts are
>> disabled. The good thing is that kfifo handles concurrent read and write
>> access
>> just fine as long as there is only one reader and one writer, so we do not
>> any
>> locking to protect against concurrent access from the read and writer
>> thread. It
>> is possible though that userspace is trying to read from the event FIFO from
>> multiple concurrent threads, so we need to add locking to protect against
>> this.
>> This is done using a mutex. The mutex will only protect the kfifo_to_user()
>> call, it will not protect the waitqueue. This means that multiple threads
>> can be
>> waiting for new data and once a new event is added to the FIFO all waiting
>> threads will be woken up. If one of those threads is unable to read any data
>> (because another thread already read all the data) it will go back to
>> sleep. The
>> only remaining issue is that now that the clearing of the BUSY flag and the
>> emptying of the FIFO does no longer happen in one atomic step it is possible
>> that a event is added to the FIFO after it has been emptied and this
>> sample will
>> be visible the next time a new event file descriptor is created. To avoid
>> this
>> rather move the emptying of the FIFO from iio_event_chrdev_release to
>> iio_event_getfd().
>>
>> Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
> The patch looks fine.  I'm a little worried that's too involved for this stage
> in the merge cycle.  So is this an issue observed in the wild (in which case
> I probably want to get it upstream asap) or is it just a theoretical issue
> at the moment? (e.g. can I leave it for the next merge window).

I haven't seen it in the wild. I only noticed it when I was working on the 
locking for the kfifo buffer, where the issue actually did happen. The issue 
will happen if the page that backs the userspace memory that the event is 
supposed to be copied into is not mapped. That certainly can happen, but 
given that nobody complained I guess it does not happen that often, so I 
guess there is no need to let it go through fixes.

>
> Actually without the spin lock in getfd, is there any path where it would
> allocate
> two anon file descriptors?

The test_and_set_bit is atomic, so we'll never get past that if there are 
concurrent iio_event_getfd() calls.

>
> Jonathan
>> ---
>>   drivers/iio/industrialio-event.c | 83
>> ++++++++++++++++++++--------------------
>>   1 file changed, 41 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/iio/industrialio-event.c
>> b/drivers/iio/industrialio-event.c
>> index c9c1419..0719443 100644
>> --- a/drivers/iio/industrialio-event.c
>> +++ b/drivers/iio/industrialio-event.c
>> @@ -40,6 +40,7 @@ struct iio_event_interface {
>>       struct list_head    dev_attr_list;
>>       unsigned long        flags;
>>       struct attribute_group    group;
>> +    struct mutex        read_lock;
>>   };
>>
>>   /**
>> @@ -47,16 +48,17 @@ struct iio_event_interface {
>>    * @indio_dev:        IIO device structure
>>    * @ev_code:        What event
>>    * @timestamp:        When the event occurred
>> + *
>> + * Note: The caller must make sure that this function is not running
>> + * concurrently for the same indio_dev more than once.
>>    **/
>>   int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
>>   {
>>       struct iio_event_interface *ev_int = indio_dev->event_interface;
>>       struct iio_event_data ev;
>> -    unsigned long flags;
>>       int copied;
>>
>>       /* Does anyone care? */
>> -    spin_lock_irqsave(&ev_int->wait.lock, flags);
>>       if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
>>
>>           ev.id = ev_code;
>> @@ -64,9 +66,8 @@ int iio_push_event(struct iio_dev *indio_dev, u64
>> ev_code, s64 timestamp)
>>
>>           copied = kfifo_put(&ev_int->det_events, ev);
>>           if (copied != 0)
>> -            wake_up_locked_poll(&ev_int->wait, POLLIN);
>> +            wake_up_poll(&ev_int->wait, POLLIN);
>>       }
>> -    spin_unlock_irqrestore(&ev_int->wait.lock, flags);
>>
>>       return 0;
>>   }
>> @@ -87,10 +88,8 @@ static unsigned int iio_event_poll(struct file *filep,
>>
>>       poll_wait(filep, &ev_int->wait, wait);
>>
>> -    spin_lock_irq(&ev_int->wait.lock);
>>       if (!kfifo_is_empty(&ev_int->det_events))
>>           events = POLLIN | POLLRDNORM;
>> -    spin_unlock_irq(&ev_int->wait.lock);
>>
>>       return events;
>>   }
>> @@ -111,31 +110,40 @@ static ssize_t iio_event_chrdev_read(struct file
>> *filep,
>>       if (count < sizeof(struct iio_event_data))
>>           return -EINVAL;
>>
>> -    spin_lock_irq(&ev_int->wait.lock);
>> -    if (kfifo_is_empty(&ev_int->det_events)) {
>> -        if (filep->f_flags & O_NONBLOCK) {
>> -            ret = -EAGAIN;
>> -            goto error_unlock;
>> -        }
>> -        /* Blocking on device; waiting for something to be there */
>> -        ret = wait_event_interruptible_locked_irq(ev_int->wait,
>> +    do {
>> +        if (kfifo_is_empty(&ev_int->det_events)) {
>> +            if (filep->f_flags & O_NONBLOCK)
>> +                return -EAGAIN;
>> +
>> +            ret = wait_event_interruptible(ev_int->wait,
>>                       !kfifo_is_empty(&ev_int->det_events) ||
>>                       indio_dev->info == NULL);
>> -        if (ret)
>> -            goto error_unlock;
>> -        if (indio_dev->info == NULL) {
>> -            ret = -ENODEV;
>> -            goto error_unlock;
>> +            if (ret)
>> +                return ret;
>> +            if (indio_dev->info == NULL)
>> +                return -ENODEV;
>>           }
>> -        /* Single access device so no one else can get the data */
>> -    }
>>
>> -    ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
>> +        if (mutex_lock_interruptible(&ev_int->read_lock))
>> +            return -ERESTARTSYS;
>> +        ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
>> +        mutex_unlock(&ev_int->read_lock);
>> +
>> +        if (ret)
>> +            return ret;
>> +
>> +        /*
>> +         * If we couldn't read anything from the fifo (a different
>> +         * thread might have been faster) we either return -EAGAIN if
>> +         * the file descriptor is non-blocking, otherwise we go back to
>> +         * sleep and wait for more data to arrive.
>> +         */
>> +        if (copied == 0 && (filep->f_flags & O_NONBLOCK))
>> +            return -EAGAIN;
>>
>> -error_unlock:
>> -    spin_unlock_irq(&ev_int->wait.lock);
>> +    } while (copied == 0);
>>
>> -    return ret ? ret : copied;
>> +    return copied;
>>   }
>>
>>   static int iio_event_chrdev_release(struct inode *inode, struct file
>> *filep)
>> @@ -143,15 +151,7 @@ static int iio_event_chrdev_release(struct inode
>> *inode, struct file *filep)
>>       struct iio_dev *indio_dev = filep->private_data;
>>       struct iio_event_interface *ev_int = indio_dev->event_interface;
>>
>> -    spin_lock_irq(&ev_int->wait.lock);
>> -    __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
>> -    /*
>> -     * In order to maintain a clean state for reopening,
>> -     * clear out any awaiting events. The mask will prevent
>> -     * any new __iio_push_event calls running.
>> -     */
>> -    kfifo_reset_out(&ev_int->det_events);
>> -    spin_unlock_irq(&ev_int->wait.lock);
>> +    clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
>>
>>       iio_device_put(indio_dev);
>>
>> @@ -174,22 +174,20 @@ int iio_event_getfd(struct iio_dev *indio_dev)
>>       if (ev_int == NULL)
>>           return -ENODEV;
>>
>> -    spin_lock_irq(&ev_int->wait.lock);
>> -    if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
>> -        spin_unlock_irq(&ev_int->wait.lock);
>> +    if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
>>           return -EBUSY;
>> -    }
>> -    spin_unlock_irq(&ev_int->wait.lock);
>> +
>>       iio_device_get(indio_dev);
>>
>>       fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
>>                   indio_dev, O_RDONLY | O_CLOEXEC);
>>       if (fd < 0) {
>> -        spin_lock_irq(&ev_int->wait.lock);
>> -        __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
>> -        spin_unlock_irq(&ev_int->wait.lock);
>> +        clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
>>           iio_device_put(indio_dev);
>> +    } else {
>> +        kfifo_reset_out(&ev_int->det_events);
>>       }
>> +
>>       return fd;
>>   }
>>
>> @@ -425,6 +423,7 @@ static void iio_setup_ev_int(struct
>> iio_event_interface *ev_int)
>>   {
>>       INIT_KFIFO(ev_int->det_events);
>>       init_waitqueue_head(&ev_int->wait);
>> +    mutex_init(&ev_int->read_lock);
>>   }
>>
>>   static const char *iio_event_group_name = "events";
>>
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-02-18 11:37 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-02-14 18:49 [PATCH] iio:event: Fix and cleanup locking Lars-Peter Clausen
2014-02-18 10:54 ` Jonathan Cameron
2014-02-18 11:37   ` Lars-Peter Clausen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.