linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] input: evdev: Use multi-reader buffer to save space (rev5)
@ 2010-06-20 18:48 Henrik Rydberg
  2010-06-23  6:19 ` Dmitry Torokhov
  0 siblings, 1 reply; 5+ messages in thread
From: Henrik Rydberg @ 2010-06-20 18:48 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, linux-kernel, Jiri Kosina, Mika Kuoppala,
	Benjamin Tissoires, Rafi Rubin, Henrik Rydberg

Preparing for larger buffer needs, convert the current per-client
circular buffer to a single buffer with multiple clients. Ideally, there
should be a mechanism where clients wait during buffer collision only.
Meanwhile, let clients take the dev->event_lock, which is already held
during buffer writes.

Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
---
Hi Dmitry,

This is revision 5 of the evdev multi-reader buffer patch, fixing the
grab/ungrab bug mentioned in an earlier post.

Thanks,
Henrik

 drivers/input/evdev.c |   65 ++++++++++++++++++++++++++++++++++---------------
 1 files changed, 45 insertions(+), 20 deletions(-)

diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2ee6c7a..da0fe3f 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -33,13 +33,13 @@ struct evdev {
 	spinlock_t client_lock; /* protects client_list */
 	struct mutex mutex;
 	struct device dev;
+	int head;
+	struct input_event buffer[EVDEV_BUFFER_SIZE];
 };
 
 struct evdev_client {
-	struct input_event buffer[EVDEV_BUFFER_SIZE];
 	int head;
 	int tail;
-	spinlock_t buffer_lock; /* protects access to buffer, head and tail */
 	struct fasync_struct *fasync;
 	struct evdev *evdev;
 	struct list_head node;
@@ -48,18 +48,13 @@ struct evdev_client {
 static struct evdev *evdev_table[EVDEV_MINORS];
 static DEFINE_MUTEX(evdev_table_mutex);
 
-static void evdev_pass_event(struct evdev_client *client,
-			     struct input_event *event)
+static inline void evdev_sync_event(struct evdev_client *client,
+				    struct evdev *evdev, int type)
 {
-	/*
-	 * Interrupts are disabled, just acquire the lock
-	 */
-	spin_lock(&client->buffer_lock);
-	client->buffer[client->head++] = *event;
-	client->head &= EVDEV_BUFFER_SIZE - 1;
-	spin_unlock(&client->buffer_lock);
-
-	if (event->type == EV_SYN)
+	/* sync the reader such that it never becomes empty */
+	if (client->tail != evdev->head)
+		client->head = evdev->head;
+	if (type == EV_SYN)
 		kill_fasync(&client->fasync, SIGIO, POLL_IN);
 }
 
@@ -78,14 +73,18 @@ static void evdev_event(struct input_handle *handle,
 	event.code = code;
 	event.value = value;
 
+	/* dev->event_lock held */
+	evdev->buffer[evdev->head] = event;
+	evdev->head = (evdev->head + 1) & (EVDEV_BUFFER_SIZE - 1);
+
 	rcu_read_lock();
 
 	client = rcu_dereference(evdev->grab);
 	if (client)
-		evdev_pass_event(client, &event);
+		evdev_sync_event(client, evdev, type);
 	else
 		list_for_each_entry_rcu(client, &evdev->client_list, node)
-			evdev_pass_event(client, &event);
+			evdev_sync_event(client, evdev, type);
 
 	rcu_read_unlock();
 
@@ -149,11 +148,29 @@ static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
 
 static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
 {
+	struct input_dev *dev = evdev->handle.dev;
+	int head, tail;
+
 	if (evdev->grab != client)
 		return  -EINVAL;
 
+	spin_lock_irq(&dev->event_lock);
+
+	head = client->head;
+	tail = client->tail;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(client, &evdev->client_list, node) {
+		client->head = head;
+		client->tail = tail;
+	}
+	rcu_read_unlock();
+
 	rcu_assign_pointer(evdev->grab, NULL);
 	synchronize_rcu();
+
+	spin_unlock_irq(&dev->event_lock);
+
 	input_release_device(&evdev->handle);
 
 	return 0;
@@ -162,6 +179,7 @@ static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
 static void evdev_attach_client(struct evdev *evdev,
 				struct evdev_client *client)
 {
+	client->head = client->tail = evdev->head;
 	spin_lock(&evdev->client_lock);
 	list_add_tail_rcu(&client->node, &evdev->client_list);
 	spin_unlock(&evdev->client_lock);
@@ -269,7 +287,6 @@ static int evdev_open(struct inode *inode, struct file *file)
 		goto err_put_evdev;
 	}
 
-	spin_lock_init(&client->buffer_lock);
 	client->evdev = evdev;
 	evdev_attach_client(evdev, client);
 
@@ -325,19 +342,27 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
 }
 
 static int evdev_fetch_next_event(struct evdev_client *client,
+				  struct evdev *evdev,
 				  struct input_event *event)
 {
+	struct input_dev *dev = evdev->handle.dev;
 	int have_event;
 
-	spin_lock_irq(&client->buffer_lock);
+	/*
+	 * FIXME: taking event_lock protects against reentrant fops
+	 * reads and provides sufficient buffer locking. However,
+	 * clients should not block writes, and having multiple clients
+	 * waiting for each other is suboptimal.
+	 */
+	spin_lock_irq(&dev->event_lock);
 
 	have_event = client->head != client->tail;
 	if (have_event) {
-		*event = client->buffer[client->tail++];
+		*event = evdev->buffer[client->tail++];
 		client->tail &= EVDEV_BUFFER_SIZE - 1;
 	}
 
-	spin_unlock_irq(&client->buffer_lock);
+	spin_unlock_irq(&dev->event_lock);
 
 	return have_event;
 }
@@ -366,7 +391,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
 		return -ENODEV;
 
 	while (retval + input_event_size() <= count &&
-	       evdev_fetch_next_event(client, &event)) {
+	       evdev_fetch_next_event(client, evdev, &event)) {
 
 		if (input_event_to_user(buffer + retval, &event))
 			return -EFAULT;
-- 
1.6.3.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] input: evdev: Use multi-reader buffer to save space (rev5)
  2010-06-20 18:48 [PATCH] input: evdev: Use multi-reader buffer to save space (rev5) Henrik Rydberg
@ 2010-06-23  6:19 ` Dmitry Torokhov
  2010-06-23  8:11   ` Henrik Rydberg
  0 siblings, 1 reply; 5+ messages in thread
From: Dmitry Torokhov @ 2010-06-23  6:19 UTC (permalink / raw)
  To: Henrik Rydberg
  Cc: linux-input, linux-kernel, Jiri Kosina, Mika Kuoppala,
	Benjamin Tissoires, Rafi Rubin

Hi Henrik,

On Sun, Jun 20, 2010 at 08:48:54PM +0200, Henrik Rydberg wrote:
> @@ -149,11 +148,29 @@ static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
>  
>  static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
>  {
> +	struct input_dev *dev = evdev->handle.dev;
> +	int head, tail;
> +
>  	if (evdev->grab != client)
>  		return  -EINVAL;
>  
> +	spin_lock_irq(&dev->event_lock);
> +
> +	head = client->head;
> +	tail = client->tail;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(client, &evdev->client_list, node) {
> +		client->head = head;
> +		client->tail = tail;
> +	}
> +	rcu_read_unlock();
> +
>  	rcu_assign_pointer(evdev->grab, NULL);
>  	synchronize_rcu();

You may not call synchronize_rcu() while holding spinlock and with
interrupts off.

> +
> +	spin_unlock_irq(&dev->event_lock);
> +
>  	input_release_device(&evdev->handle);
>  
>  	return 0;
> @@ -162,6 +179,7 @@ static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
>  static void evdev_attach_client(struct evdev *evdev,
>  				struct evdev_client *client)
>  {
> +	client->head = client->tail = evdev->head;
>  	spin_lock(&evdev->client_lock);

So what happens if we advance evdev->head here but before we added the
new client to the list? I guess we'll have to wait till next event and
then client will read both of them...

Overall I am starting getting concerned about proper isolation between
clients. Right now, if one client stops reading events and another one
issues grab then the first client will only get events that were
accumulated before grab tookm place. With the new shared buffer the
first client may get "grabbed" events if it stop for long enough for
buffer to wrap around.

Do we really same that much memory here? We normally do not have that
many users connected to event devices at once...

Thanks.

-- 
Dmitry

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] input: evdev: Use multi-reader buffer to save space (rev5)
  2010-06-23  6:19 ` Dmitry Torokhov
@ 2010-06-23  8:11   ` Henrik Rydberg
  2010-06-25  8:14     ` Dmitry Torokhov
  0 siblings, 1 reply; 5+ messages in thread
From: Henrik Rydberg @ 2010-06-23  8:11 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, linux-kernel, Jiri Kosina, Mika Kuoppala,
	Benjamin Tissoires, Rafi Rubin

Dmitry Torokhov wrote:
> Hi Henrik,
> 
> On Sun, Jun 20, 2010 at 08:48:54PM +0200, Henrik Rydberg wrote:
>> @@ -149,11 +148,29 @@ static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
>>  
>>  static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
>>  {
>> +	struct input_dev *dev = evdev->handle.dev;
>> +	int head, tail;
>> +
>>  	if (evdev->grab != client)
>>  		return  -EINVAL;
>>  
>> +	spin_lock_irq(&dev->event_lock);
>> +
>> +	head = client->head;
>> +	tail = client->tail;
>> +
>> +	rcu_read_lock();
>> +	list_for_each_entry_rcu(client, &evdev->client_list, node) {
>> +		client->head = head;
>> +		client->tail = tail;
>> +	}
>> +	rcu_read_unlock();
>> +
>>  	rcu_assign_pointer(evdev->grab, NULL);
>>  	synchronize_rcu();
> 
> You may not call synchronize_rcu() while holding spinlock and with
> interrupts off.

Thanks.

>> +
>> +	spin_unlock_irq(&dev->event_lock);
>> +
>>  	input_release_device(&evdev->handle);
>>  
>>  	return 0;
>> @@ -162,6 +179,7 @@ static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
>>  static void evdev_attach_client(struct evdev *evdev,
>>  				struct evdev_client *client)
>>  {
>> +	client->head = client->tail = evdev->head;
>>  	spin_lock(&evdev->client_lock);
> 
> So what happens if we advance evdev->head here but before we added the
> new client to the list? I guess we'll have to wait till next event and
> then client will read both of them...

Right...

> Overall I am starting getting concerned about proper isolation between
> clients. Right now, if one client stops reading events and another one
> issues grab then the first client will only get events that were
> accumulated before grab tookm place. With the new shared buffer the
> first client may get "grabbed" events if it stop for long enough for
> buffer to wrap around.

Doing some research, the semantics of ioctl have obviously been discussed
before, and I believe this points to another such issue. When grabbing a device,
are we guaranteeing that the device no longer sends events to other clients, or
are we guaranteeing that other clients can no longer read the device? If the
latter, clearing all client buffers in conjunction with a grab would be
appropriate, and would solve this issue.

> Do we really same that much memory here? We normally do not have that
> many users connected to event devices at once...

Ok, let's scratch this. Although I think the idea of multi-reader buffers is
sound, it is obviously sufficiently incompatible with the current approach to
produce distastefully complex patches. I will return with a new set which only
fixes the buffer resize problem, and leaves the rest for later.

Thanks,
Henrik


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] input: evdev: Use multi-reader buffer to save space (rev5)
  2010-06-23  8:11   ` Henrik Rydberg
@ 2010-06-25  8:14     ` Dmitry Torokhov
  2010-06-25 13:19       ` Henrik Rydberg
  0 siblings, 1 reply; 5+ messages in thread
From: Dmitry Torokhov @ 2010-06-25  8:14 UTC (permalink / raw)
  To: Henrik Rydberg
  Cc: linux-input, linux-kernel, Jiri Kosina, Mika Kuoppala,
	Benjamin Tissoires, Rafi Rubin

On Wed, Jun 23, 2010 at 10:11:47AM +0200, Henrik Rydberg wrote:
> Dmitry Torokhov wrote:
> 
> > Overall I am starting getting concerned about proper isolation between
> > clients. Right now, if one client stops reading events and another one
> > issues grab then the first client will only get events that were
> > accumulated before grab tookm place. With the new shared buffer the
> > first client may get "grabbed" events if it stop for long enough for
> > buffer to wrap around.
> 
> Doing some research, the semantics of ioctl have obviously been discussed
> before, and I believe this points to another such issue. When grabbing a device,
> are we guaranteeing that the device no longer sends events to other clients, or
> are we guaranteeing that other clients can no longer read the device? If the
> latter, clearing all client buffers in conjunction with a grab would be
> appropriate, and would solve this issue.


Yes, I think it would be acceptable approach.

> 
> > Do we really same that much memory here? We normally do not have that
> > many users connected to event devices at once...
> 
> Ok, let's scratch this. Although I think the idea of multi-reader buffers is
> sound, it is obviously sufficiently incompatible with the current approach to
> produce distastefully complex patches. I will return with a new set which only
> fixes the buffer resize problem, and leaves the rest for later.
> 

Right, let's merge this and also MT slots and revisit this issue at some
later point.

Thanks.

-- 
Dmitry

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] input: evdev: Use multi-reader buffer to save space (rev5)
  2010-06-25  8:14     ` Dmitry Torokhov
@ 2010-06-25 13:19       ` Henrik Rydberg
  0 siblings, 0 replies; 5+ messages in thread
From: Henrik Rydberg @ 2010-06-25 13:19 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, linux-kernel, Jiri Kosina, Mika Kuoppala,
	Benjamin Tissoires, Rafi Rubin

Dmitry Torokhov wrote:
> On Wed, Jun 23, 2010 at 10:11:47AM +0200, Henrik Rydberg wrote:
>> Dmitry Torokhov wrote:
>>
>>> Overall I am starting getting concerned about proper isolation between
>>> clients. Right now, if one client stops reading events and another one
>>> issues grab then the first client will only get events that were
>>> accumulated before grab tookm place. With the new shared buffer the
>>> first client may get "grabbed" events if it stop for long enough for
>>> buffer to wrap around.
>> Doing some research, the semantics of ioctl have obviously been discussed
>> before, and I believe this points to another such issue. When grabbing a device,
>> are we guaranteeing that the device no longer sends events to other clients, or
>> are we guaranteeing that other clients can no longer read the device? If the
>> latter, clearing all client buffers in conjunction with a grab would be
>> appropriate, and would solve this issue.
> 
> 
> Yes, I think it would be acceptable approach.
> 
>>> Do we really same that much memory here? We normally do not have that
>>> many users connected to event devices at once...
>> Ok, let's scratch this. Although I think the idea of multi-reader buffers is
>> sound, it is obviously sufficiently incompatible with the current approach to
>> produce distastefully complex patches. I will return with a new set which only
>> fixes the buffer resize problem, and leaves the rest for later.
>>
> 
> Right, let's merge this and also MT slots and revisit this issue at some
> later point.

Sounds good. I just resent the main MT patches, adding some more Cc:s, and to
make sure we both have the same version. :-) Regarding the ioctl stuff for MT
slots, I did not send those again, I am not sure what to do with them.

Thanks,
Henrik


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2010-06-25 13:39 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-06-20 18:48 [PATCH] input: evdev: Use multi-reader buffer to save space (rev5) Henrik Rydberg
2010-06-23  6:19 ` Dmitry Torokhov
2010-06-23  8:11   ` Henrik Rydberg
2010-06-25  8:14     ` Dmitry Torokhov
2010-06-25 13:19       ` Henrik Rydberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).