All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] input: evdev: Dynamic buffers
@ 2010-05-28 15:10 Henrik Rydberg
  2010-05-28 15:10 ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Henrik Rydberg
  0 siblings, 1 reply; 6+ messages in thread
From: Henrik Rydberg @ 2010-05-28 15:10 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, Jiri Kosina, Mika Kuoppala, Benjamin Tissoires,
	Rafi Rubin, Henrik Rydberg

Hi Dmitry,

Here is a first stab at the problem of overrun buffers in evdev, when
running MT devices.

The approach is to first convert the current evdev client fifos to a
single multi-reader buffer, to save memory and increase resilience
towards many listeners. Locking is sparsely utilized in favor of
atomic operations, and there might be a memory barrier missing.  In
the second patch, the (single) static buffer is converted to a dynamic
one, leaving the current buffer size intact. The third patch
introduces the events_per_packet interface in input_dev, and utilizes
the information to compute the evdev buffer size.

The patches are lightly tested, anticipating some discussion.

Cheers,
Henrik

---

Henrik Rydberg (3):
  input: evdev: use multi-reader buffer to save space
  input: evdev: convert to dynamic event buffer
  input: use driver hint to compute the evdev buffer size

 drivers/input/evdev.c |   82 ++++++++++++++++++++++++++++++------------------
 include/linux/input.h |    7 ++++
 2 files changed, 58 insertions(+), 31 deletions(-)


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/3] input: evdev: use multi-reader buffer to save space
  2010-05-28 15:10 [PATCH 0/3] input: evdev: Dynamic buffers Henrik Rydberg
@ 2010-05-28 15:10 ` Henrik Rydberg
  2010-05-28 15:10   ` [PATCH 2/3] input: evdev: convert to dynamic event buffer Henrik Rydberg
  2010-05-28 18:10   ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Dmitry Torokhov
  0 siblings, 2 replies; 6+ messages in thread
From: Henrik Rydberg @ 2010-05-28 15:10 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, Jiri Kosina, Mika Kuoppala, Benjamin Tissoires,
	Rafi Rubin, Henrik Rydberg

In preparation of larger buffer needs, convert the current per-client
circular buffer to a single buffer with multiple clients. Since there
is only one writer but potentially many readers, use the somewhat
discouraged rwlock_t pattern.

Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
---
 drivers/input/evdev.c |   60 +++++++++++++++++++++++++-----------------------
 1 files changed, 31 insertions(+), 29 deletions(-)

diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2ee6c7a..8edea95 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -33,13 +33,14 @@ struct evdev {
 	spinlock_t client_lock; /* protects client_list */
 	struct mutex mutex;
 	struct device dev;
+	int head;
+	struct input_event buffer[EVDEV_BUFFER_SIZE];
+	rwlock_t buffer_lock; /* protects access to buffer only */
 };
 
 struct evdev_client {
-	struct input_event buffer[EVDEV_BUFFER_SIZE];
-	int head;
+	atomic_t head;
 	int tail;
-	spinlock_t buffer_lock; /* protects access to buffer, head and tail */
 	struct fasync_struct *fasync;
 	struct evdev *evdev;
 	struct list_head node;
@@ -48,18 +49,11 @@ struct evdev_client {
 static struct evdev *evdev_table[EVDEV_MINORS];
 static DEFINE_MUTEX(evdev_table_mutex);
 
-static void evdev_pass_event(struct evdev_client *client,
-			     struct input_event *event)
+static inline void evdev_sync_event(struct evdev_client *client,
+				    int head, int type)
 {
-	/*
-	 * Interrupts are disabled, just acquire the lock
-	 */
-	spin_lock(&client->buffer_lock);
-	client->buffer[client->head++] = *event;
-	client->head &= EVDEV_BUFFER_SIZE - 1;
-	spin_unlock(&client->buffer_lock);
-
-	if (event->type == EV_SYN)
+	atomic_set(&client->head, head);
+	if (type == EV_SYN)
 		kill_fasync(&client->fasync, SIGIO, POLL_IN);
 }
 
@@ -78,14 +72,22 @@ static void evdev_event(struct input_handle *handle,
 	event.code = code;
 	event.value = value;
 
+	/* Interrupts are disabled */
+	write_lock(&evdev->buffer_lock);
+	evdev->buffer[evdev->head] = event;
+	write_unlock(&evdev->buffer_lock);
+
+	evdev->head++;
+	evdev->head &= EVDEV_BUFFER_SIZE - 1;
+
 	rcu_read_lock();
 
 	client = rcu_dereference(evdev->grab);
 	if (client)
-		evdev_pass_event(client, &event);
+		evdev_sync_event(client, evdev->head, type);
 	else
 		list_for_each_entry_rcu(client, &evdev->client_list, node)
-			evdev_pass_event(client, &event);
+			evdev_sync_event(client, evdev->head, type);
 
 	rcu_read_unlock();
 
@@ -269,7 +271,6 @@ static int evdev_open(struct inode *inode, struct file *file)
 		goto err_put_evdev;
 	}
 
-	spin_lock_init(&client->buffer_lock);
 	client->evdev = evdev;
 	evdev_attach_client(evdev, client);
 
@@ -324,21 +325,20 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
 	return retval;
 }
 
-static int evdev_fetch_next_event(struct evdev_client *client,
+static int evdev_fetch_next_event(struct evdev *evdev,
+				  struct evdev_client *client,
 				  struct input_event *event)
 {
-	int have_event;
+	int have_event = atomic_read(&client->head) != client->tail;
 
-	spin_lock_irq(&client->buffer_lock);
-
-	have_event = client->head != client->tail;
 	if (have_event) {
-		*event = client->buffer[client->tail++];
+		read_lock_irq(&evdev->buffer_lock);
+		*event = evdev->buffer[client->tail];
+		read_unlock_irq(&evdev->buffer_lock);
+		client->tail++;
 		client->tail &= EVDEV_BUFFER_SIZE - 1;
 	}
 
-	spin_unlock_irq(&client->buffer_lock);
-
 	return have_event;
 }
 
@@ -353,12 +353,12 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
 	if (count < input_event_size())
 		return -EINVAL;
 
-	if (client->head == client->tail && evdev->exist &&
+	if (atomic_read(&client->head) == client->tail && evdev->exist &&
 	    (file->f_flags & O_NONBLOCK))
 		return -EAGAIN;
 
 	retval = wait_event_interruptible(evdev->wait,
-		client->head != client->tail || !evdev->exist);
+		atomic_read(&client->head) != client->tail || !evdev->exist);
 	if (retval)
 		return retval;
 
@@ -366,7 +366,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
 		return -ENODEV;
 
 	while (retval + input_event_size() <= count &&
-	       evdev_fetch_next_event(client, &event)) {
+	       evdev_fetch_next_event(evdev, client, &event)) {
 
 		if (input_event_to_user(buffer + retval, &event))
 			return -EFAULT;
@@ -384,7 +384,8 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
 	struct evdev *evdev = client->evdev;
 
 	poll_wait(file, &evdev->wait, wait);
-	return ((client->head == client->tail) ? 0 : (POLLIN | POLLRDNORM)) |
+	return ((atomic_read(&client->head) == client->tail) ?
+		0 : (POLLIN | POLLRDNORM)) |
 		(evdev->exist ? 0 : (POLLHUP | POLLERR));
 }
 
@@ -815,6 +816,7 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
 	spin_lock_init(&evdev->client_lock);
 	mutex_init(&evdev->mutex);
 	init_waitqueue_head(&evdev->wait);
+	rwlock_init(&evdev->buffer_lock);
 
 	dev_set_name(&evdev->dev, "event%d", minor);
 	evdev->exist = 1;
-- 
1.6.3.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/3] input: evdev: convert to dynamic event buffer
  2010-05-28 15:10 ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Henrik Rydberg
@ 2010-05-28 15:10   ` Henrik Rydberg
  2010-05-28 15:10     ` [PATCH 3/3] input: use driver hint to compute the evdev buffer size Henrik Rydberg
  2010-05-28 18:10   ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Dmitry Torokhov
  1 sibling, 1 reply; 6+ messages in thread
From: Henrik Rydberg @ 2010-05-28 15:10 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, Jiri Kosina, Mika Kuoppala, Benjamin Tissoires,
	Rafi Rubin, Henrik Rydberg

Allocate the event buffer dynamically, and prepare to compute the buffer
size in a separate function. This patch defines the size computation to
be identical to the current code, and does not contain any logical changes.

Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
---
 drivers/input/evdev.c |   23 +++++++++++++++++++----
 1 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 8edea95..ea652cc 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -10,7 +10,7 @@
 
 #define EVDEV_MINOR_BASE	64
 #define EVDEV_MINORS		32
-#define EVDEV_BUFFER_SIZE	64
+#define EVDEV_MIN_BUFFER_SIZE	64
 
 #include <linux/poll.h>
 #include <linux/sched.h>
@@ -34,7 +34,8 @@ struct evdev {
 	struct mutex mutex;
 	struct device dev;
 	int head;
-	struct input_event buffer[EVDEV_BUFFER_SIZE];
+	int bufsize;
+	struct input_event *buffer;
 	rwlock_t buffer_lock; /* protects access to buffer only */
 };
 
@@ -78,7 +79,7 @@ static void evdev_event(struct input_handle *handle,
 	write_unlock(&evdev->buffer_lock);
 
 	evdev->head++;
-	evdev->head &= EVDEV_BUFFER_SIZE - 1;
+	evdev->head &= evdev->bufsize - 1;
 
 	rcu_read_lock();
 
@@ -125,6 +126,7 @@ static void evdev_free(struct device *dev)
 	struct evdev *evdev = container_of(dev, struct evdev, dev);
 
 	input_put_device(evdev->handle.dev);
+	kfree(evdev->buffer);
 	kfree(evdev);
 }
 
@@ -336,7 +338,7 @@ static int evdev_fetch_next_event(struct evdev *evdev,
 		*event = evdev->buffer[client->tail];
 		read_unlock_irq(&evdev->buffer_lock);
 		client->tail++;
-		client->tail &= EVDEV_BUFFER_SIZE - 1;
+		client->tail &= evdev->bufsize - 1;
 	}
 
 	return have_event;
@@ -788,6 +790,11 @@ static void evdev_cleanup(struct evdev *evdev)
 	}
 }
 
+static int evdev_compute_buffer_size(struct input_dev *dev)
+{
+	return EVDEV_MIN_BUFFER_SIZE;
+}
+
 /*
  * Create new evdev device. Note that input core serializes calls
  * to connect and disconnect so we don't need to lock evdev_table here.
@@ -833,6 +840,14 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
 	evdev->dev.release = evdev_free;
 	device_initialize(&evdev->dev);
 
+	evdev->bufsize = evdev_compute_buffer_size(dev);
+	evdev->buffer = kzalloc(evdev->bufsize * sizeof(struct input_event),
+				GFP_KERNEL);
+	if (!evdev->buffer) {
+		error = -ENOMEM;
+		goto err_free_evdev;
+	}
+
 	error = input_register_handle(&evdev->handle);
 	if (error)
 		goto err_free_evdev;
-- 
1.6.3.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/3] input: use driver hint to compute the evdev buffer size
  2010-05-28 15:10   ` [PATCH 2/3] input: evdev: convert to dynamic event buffer Henrik Rydberg
@ 2010-05-28 15:10     ` Henrik Rydberg
  0 siblings, 0 replies; 6+ messages in thread
From: Henrik Rydberg @ 2010-05-28 15:10 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, Jiri Kosina, Mika Kuoppala, Benjamin Tissoires,
	Rafi Rubin, Henrik Rydberg

Some devices, in particular MT devices, produce a lot of data.
This leads to a high frequency of lost packets in evdev, which
by default uses a fairly small event buffer. Let the drivers hint
the average number of events per packet for the device by calling
the input_set_events_per_packet(), and use that information when
computing the evdev buffer size.

Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
---
 drivers/input/evdev.c |    5 ++++-
 include/linux/input.h |    7 +++++++
 2 files changed, 11 insertions(+), 1 deletions(-)

diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index ea652cc..03ce9c1 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -11,6 +11,7 @@
 #define EVDEV_MINOR_BASE	64
 #define EVDEV_MINORS		32
 #define EVDEV_MIN_BUFFER_SIZE	64
+#define EVDEV_BUF_PACKETS	8
 
 #include <linux/poll.h>
 #include <linux/sched.h>
@@ -792,7 +793,9 @@ static void evdev_cleanup(struct evdev *evdev)
 
 static int evdev_compute_buffer_size(struct input_dev *dev)
 {
-	return EVDEV_MIN_BUFFER_SIZE;
+	int nev = dev->hint_events_per_packet * EVDEV_BUF_PACKETS;
+	nev = max(nev, EVDEV_MIN_BUFFER_SIZE);
+	return roundup_pow_of_two(nev);
 }
 
 /*
diff --git a/include/linux/input.h b/include/linux/input.h
index bd00786..35b015d 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1162,6 +1162,8 @@ struct input_dev {
 	unsigned long ffbit[BITS_TO_LONGS(FF_CNT)];
 	unsigned long swbit[BITS_TO_LONGS(SW_CNT)];
 
+	unsigned int hint_events_per_packet;
+
 	unsigned int keycodemax;
 	unsigned int keycodesize;
 	void *keycode;
@@ -1439,6 +1441,11 @@ static inline void input_mt_slot(struct input_dev *dev, int slot)
 
 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code);
 
+static inline void input_set_events_per_packet(struct input_dev *dev, int nev)
+{
+	dev->hint_events_per_packet = nev;
+}
+
 static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat)
 {
 	dev->absmin[axis] = min;
-- 
1.6.3.3


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] input: evdev: use multi-reader buffer to save space
  2010-05-28 15:10 ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Henrik Rydberg
  2010-05-28 15:10   ` [PATCH 2/3] input: evdev: convert to dynamic event buffer Henrik Rydberg
@ 2010-05-28 18:10   ` Dmitry Torokhov
  2010-05-28 18:34     ` Henrik Rydberg
  1 sibling, 1 reply; 6+ messages in thread
From: Dmitry Torokhov @ 2010-05-28 18:10 UTC (permalink / raw)
  To: Henrik Rydberg
  Cc: linux-input, Jiri Kosina, Mika Kuoppala, Benjamin Tissoires, Rafi Rubin

Hi Henrik,

On Fri, May 28, 2010 at 05:10:32PM +0200, Henrik Rydberg wrote:
> In preparation of larger buffer needs, convert the current per-client
> circular buffer to a single buffer with multiple clients. Since there
> is only one writer but potentially many readers, use the somewhat
> discouraged rwlock_t pattern.

rwlock is completely unsuitable here:

1. You must absiolutely not starve writers since they are quite likely
are interrupt handlers. If anything they should get priority.

2. The amount of time the thing is locked is very miniscule, I'd say
that regular spinlocks will still give better performance.

Also, use of atomic is funky, I do not think it does whta you think it
would... Could you explain more why you are using it?

Thanks.

> 
> Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
> ---
>  drivers/input/evdev.c |   60 +++++++++++++++++++++++++-----------------------
>  1 files changed, 31 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
> index 2ee6c7a..8edea95 100644
> --- a/drivers/input/evdev.c
> +++ b/drivers/input/evdev.c
> @@ -33,13 +33,14 @@ struct evdev {
>  	spinlock_t client_lock; /* protects client_list */
>  	struct mutex mutex;
>  	struct device dev;
> +	int head;
> +	struct input_event buffer[EVDEV_BUFFER_SIZE];
> +	rwlock_t buffer_lock; /* protects access to buffer only */
>  };
>  
>  struct evdev_client {
> -	struct input_event buffer[EVDEV_BUFFER_SIZE];
> -	int head;
> +	atomic_t head;
>  	int tail;
> -	spinlock_t buffer_lock; /* protects access to buffer, head and tail */
>  	struct fasync_struct *fasync;
>  	struct evdev *evdev;
>  	struct list_head node;
> @@ -48,18 +49,11 @@ struct evdev_client {
>  static struct evdev *evdev_table[EVDEV_MINORS];
>  static DEFINE_MUTEX(evdev_table_mutex);
>  
> -static void evdev_pass_event(struct evdev_client *client,
> -			     struct input_event *event)
> +static inline void evdev_sync_event(struct evdev_client *client,
> +				    int head, int type)
>  {
> -	/*
> -	 * Interrupts are disabled, just acquire the lock
> -	 */
> -	spin_lock(&client->buffer_lock);
> -	client->buffer[client->head++] = *event;
> -	client->head &= EVDEV_BUFFER_SIZE - 1;
> -	spin_unlock(&client->buffer_lock);
> -
> -	if (event->type == EV_SYN)
> +	atomic_set(&client->head, head);
> +	if (type == EV_SYN)
>  		kill_fasync(&client->fasync, SIGIO, POLL_IN);
>  }
>  
> @@ -78,14 +72,22 @@ static void evdev_event(struct input_handle *handle,
>  	event.code = code;
>  	event.value = value;
>  
> +	/* Interrupts are disabled */
> +	write_lock(&evdev->buffer_lock);
> +	evdev->buffer[evdev->head] = event;
> +	write_unlock(&evdev->buffer_lock);
> +
> +	evdev->head++;
> +	evdev->head &= EVDEV_BUFFER_SIZE - 1;
> +
>  	rcu_read_lock();
>  
>  	client = rcu_dereference(evdev->grab);
>  	if (client)
> -		evdev_pass_event(client, &event);
> +		evdev_sync_event(client, evdev->head, type);
>  	else
>  		list_for_each_entry_rcu(client, &evdev->client_list, node)
> -			evdev_pass_event(client, &event);
> +			evdev_sync_event(client, evdev->head, type);
>  
>  	rcu_read_unlock();
>  
> @@ -269,7 +271,6 @@ static int evdev_open(struct inode *inode, struct file *file)
>  		goto err_put_evdev;
>  	}
>  
> -	spin_lock_init(&client->buffer_lock);
>  	client->evdev = evdev;
>  	evdev_attach_client(evdev, client);
>  
> @@ -324,21 +325,20 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
>  	return retval;
>  }
>  
> -static int evdev_fetch_next_event(struct evdev_client *client,
> +static int evdev_fetch_next_event(struct evdev *evdev,
> +				  struct evdev_client *client,
>  				  struct input_event *event)
>  {
> -	int have_event;
> +	int have_event = atomic_read(&client->head) != client->tail;
>  
> -	spin_lock_irq(&client->buffer_lock);
> -
> -	have_event = client->head != client->tail;
>  	if (have_event) {
> -		*event = client->buffer[client->tail++];
> +		read_lock_irq(&evdev->buffer_lock);
> +		*event = evdev->buffer[client->tail];
> +		read_unlock_irq(&evdev->buffer_lock);
> +		client->tail++;
>  		client->tail &= EVDEV_BUFFER_SIZE - 1;
>  	}
>  
> -	spin_unlock_irq(&client->buffer_lock);
> -
>  	return have_event;
>  }
>  
> @@ -353,12 +353,12 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
>  	if (count < input_event_size())
>  		return -EINVAL;
>  
> -	if (client->head == client->tail && evdev->exist &&
> +	if (atomic_read(&client->head) == client->tail && evdev->exist &&
>  	    (file->f_flags & O_NONBLOCK))
>  		return -EAGAIN;
>  
>  	retval = wait_event_interruptible(evdev->wait,
> -		client->head != client->tail || !evdev->exist);
> +		atomic_read(&client->head) != client->tail || !evdev->exist);
>  	if (retval)
>  		return retval;
>  
> @@ -366,7 +366,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
>  		return -ENODEV;
>  
>  	while (retval + input_event_size() <= count &&
> -	       evdev_fetch_next_event(client, &event)) {
> +	       evdev_fetch_next_event(evdev, client, &event)) {
>  
>  		if (input_event_to_user(buffer + retval, &event))
>  			return -EFAULT;
> @@ -384,7 +384,8 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
>  	struct evdev *evdev = client->evdev;
>  
>  	poll_wait(file, &evdev->wait, wait);
> -	return ((client->head == client->tail) ? 0 : (POLLIN | POLLRDNORM)) |
> +	return ((atomic_read(&client->head) == client->tail) ?
> +		0 : (POLLIN | POLLRDNORM)) |
>  		(evdev->exist ? 0 : (POLLHUP | POLLERR));
>  }
>  
> @@ -815,6 +816,7 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
>  	spin_lock_init(&evdev->client_lock);
>  	mutex_init(&evdev->mutex);
>  	init_waitqueue_head(&evdev->wait);
> +	rwlock_init(&evdev->buffer_lock);
>  
>  	dev_set_name(&evdev->dev, "event%d", minor);
>  	evdev->exist = 1;

-- 
Dmitry

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/3] input: evdev: use multi-reader buffer to save space
  2010-05-28 18:10   ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Dmitry Torokhov
@ 2010-05-28 18:34     ` Henrik Rydberg
  0 siblings, 0 replies; 6+ messages in thread
From: Henrik Rydberg @ 2010-05-28 18:34 UTC (permalink / raw)
  To: Dmitry Torokhov
  Cc: linux-input, Jiri Kosina, Mika Kuoppala, Benjamin Tissoires, Rafi Rubin

Hi Dmitry,

> On Fri, May 28, 2010 at 05:10:32PM +0200, Henrik Rydberg wrote:
>> In preparation of larger buffer needs, convert the current per-client
>> circular buffer to a single buffer with multiple clients. Since there
>> is only one writer but potentially many readers, use the somewhat
>> discouraged rwlock_t pattern.
> 
> rwlock is completely unsuitable here:
> 
> 1. You must absiolutely not starve writers since they are quite likely
> are interrupt handlers. If anything they should get priority.

Thanks, I overlooked the starvation argument here.

> 2. The amount of time the thing is locked is very miniscule, I'd say
> that regular spinlocks will still give better performance.

I still have a feeling it should be possible to do this lock-less, but I can see
all docs and your comments pointing towards using the spinlock, so consider it
changed.

> 
> Also, use of atomic is funky, I do not think it does whta you think it
> would... Could you explain more why you are using it?

The use of atomic here serves a declarative purpose, I imagine the assembly code
is more or less identical to the current evdev code. I personally think it is
good to be able to see which variables are involved in a thread-to-thread
exchange. Removing it is not a problem. Is it just me being an old fart, not
understanding that the atomic int has been assumed for the last half decade, or
why is it considered funky?

Cheers,
Henrik


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2010-05-28 18:35 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-05-28 15:10 [PATCH 0/3] input: evdev: Dynamic buffers Henrik Rydberg
2010-05-28 15:10 ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Henrik Rydberg
2010-05-28 15:10   ` [PATCH 2/3] input: evdev: convert to dynamic event buffer Henrik Rydberg
2010-05-28 15:10     ` [PATCH 3/3] input: use driver hint to compute the evdev buffer size Henrik Rydberg
2010-05-28 18:10   ` [PATCH 1/3] input: evdev: use multi-reader buffer to save space Dmitry Torokhov
2010-05-28 18:34     ` Henrik Rydberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.