All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
@ 2014-09-16 18:33 ` Suman Anna
  0 siblings, 0 replies; 7+ messages in thread
From: Suman Anna @ 2014-09-16 18:33 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Suman Anna

The buffers to be used for communication are allocated during the
rpmsg virtio driver's probe, and the total number of buffers is
currently hard-coded to 512. The vring configuration can vary from
one platform to another or between different remote processors. The
setup of the receive buffers will throw a WARN_ON if the associated
vrings are configured with less than 256 buffers (in each direction).
So, adjust this hard-coded value to rely on the number of buffers the
virtqueue vring is setup with, but also limit to use 256 buffers at
most in each direction to avoid wacky resource tables consuming up
unreasonable memory.

NOTE: The number of buffers is already assumed to be symmetrical
in each direction, and that logic is not unchanged.

Signed-off-by: Suman Anna <s-anna@ti.com>
---
v2 changes:
- add upper limit on buffers and update comments
- revise patch description

 drivers/rpmsg/virtio_rpmsg_bus.c | 46 ++++++++++++++++++++++++++++------------
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..9ea6c43 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
  * @svq:	tx virtqueue
  * @rbufs:	kernel address of rx buffers
  * @sbufs:	kernel address of tx buffers
+ * @num_bufs:	total number of buffers for rx and tx
  * @last_sbuf:	index of last tx buffer used
  * @bufs_dma:	dma base addr of the buffers
  * @tx_lock:	protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
 	struct virtio_device *vdev;
 	struct virtqueue *rvq, *svq;
 	void *rbufs, *sbufs;
+	unsigned int num_bufs;
 	int last_sbuf;
 	dma_addr_t bufs_dma;
 	struct mutex tx_lock;
@@ -86,13 +88,14 @@ struct rpmsg_channel_info {
 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
 
 /*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers are computed from the number of buffers supported by
+ * the virtqueue vring (upto a maximum of 256 buffers in each direction).
  *
  * Each buffer will have 16 bytes for the msg header and 496 bytes for
  * the payload.
  *
- * This will require a total space of 256KB for the buffers.
+ * This will utilize a maximum total space of 256KB for the buffers.
  *
  * We might also want to add support for user-provided buffers in time.
  * This will allow bigger buffer size flexibility, and can also be used
@@ -102,9 +105,8 @@ struct rpmsg_channel_info {
  * can change this without changing anything in the firmware of the remote
  * processor.
  */
-#define RPMSG_NUM_BUFS		(512)
+#define MAX_RPMSG_NUM_BUFS	(256)
 #define RPMSG_BUF_SIZE		(512)
-#define RPMSG_TOTAL_BUF_SPACE	(RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
 
 /*
  * Local addresses are dynamically allocated on-demand.
@@ -579,7 +581,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
 	 * either pick the next unused tx buffer
 	 * (half of our buffers are used for sending messages)
 	 */
-	if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+	if (vrp->last_sbuf < vrp->num_bufs / 2)
 		ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
 	/* or recycle a used one */
 	else
@@ -948,6 +950,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	struct virtproc_info *vrp;
 	void *bufs_va;
 	int err = 0, i;
+	size_t total_buf_space;
 
 	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 	if (!vrp)
@@ -968,10 +971,24 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rvq = vqs[0];
 	vrp->svq = vqs[1];
 
+	vrp->num_bufs = MAX_RPMSG_NUM_BUFS * 2;
+	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS) {
+		/*
+		 * Override if the virtqueues are configured with less number of
+		 * buffers. We expect equal number of buffers for each direction
+		 * as per current code, so throw a warning if the configuration
+		 * doesn't match.
+		 */
+		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+		WARN_ON(virtqueue_get_vring_size(vrp->svq) !=
+			(vrp->num_bufs / 2));
+	}
+	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
 	/* allocate coherent memory for the buffers */
 	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
-				RPMSG_TOTAL_BUF_SPACE,
-				&vrp->bufs_dma, GFP_KERNEL);
+				     total_buf_space, &vrp->bufs_dma,
+				     GFP_KERNEL);
 	if (!bufs_va) {
 		err = -ENOMEM;
 		goto vqs_del;
@@ -984,10 +1001,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rbufs = bufs_va;
 
 	/* and half is dedicated for TX */
-	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+	vrp->sbufs = bufs_va + total_buf_space / 2;
 
 	/* set up the receive buffers */
-	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+	for (i = 0; i < vrp->num_bufs / 2; i++) {
 		struct scatterlist sg;
 		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
 
@@ -1023,8 +1040,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	return 0;
 
 free_coherent:
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					bufs_va, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  bufs_va, vrp->bufs_dma);
 vqs_del:
 	vdev->config->del_vqs(vrp->vdev);
 free_vrp:
@@ -1042,6 +1059,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
 static void rpmsg_remove(struct virtio_device *vdev)
 {
 	struct virtproc_info *vrp = vdev->priv;
+	size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
 	int ret;
 
 	vdev->config->reset(vdev);
@@ -1057,8 +1075,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
 
 	vdev->config->del_vqs(vrp->vdev);
 
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					vrp->rbufs, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  vrp->rbufs, vrp->bufs_dma);
 
 	kfree(vrp);
 }
-- 
2.0.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
@ 2014-09-16 18:33 ` Suman Anna
  0 siblings, 0 replies; 7+ messages in thread
From: Suman Anna @ 2014-09-16 18:33 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Suman Anna

The buffers to be used for communication are allocated during the
rpmsg virtio driver's probe, and the total number of buffers is
currently hard-coded to 512. The vring configuration can vary from
one platform to another or between different remote processors. The
setup of the receive buffers will throw a WARN_ON if the associated
vrings are configured with less than 256 buffers (in each direction).
So, adjust this hard-coded value to rely on the number of buffers the
virtqueue vring is setup with, but also limit to use 256 buffers at
most in each direction to avoid wacky resource tables consuming up
unreasonable memory.

NOTE: The number of buffers is already assumed to be symmetrical
in each direction, and that logic is not unchanged.

Signed-off-by: Suman Anna <s-anna@ti.com>
---
v2 changes:
- add upper limit on buffers and update comments
- revise patch description

 drivers/rpmsg/virtio_rpmsg_bus.c | 46 ++++++++++++++++++++++++++++------------
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..9ea6c43 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
  * @svq:	tx virtqueue
  * @rbufs:	kernel address of rx buffers
  * @sbufs:	kernel address of tx buffers
+ * @num_bufs:	total number of buffers for rx and tx
  * @last_sbuf:	index of last tx buffer used
  * @bufs_dma:	dma base addr of the buffers
  * @tx_lock:	protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
 	struct virtio_device *vdev;
 	struct virtqueue *rvq, *svq;
 	void *rbufs, *sbufs;
+	unsigned int num_bufs;
 	int last_sbuf;
 	dma_addr_t bufs_dma;
 	struct mutex tx_lock;
@@ -86,13 +88,14 @@ struct rpmsg_channel_info {
 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
 
 /*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers are computed from the number of buffers supported by
+ * the virtqueue vring (upto a maximum of 256 buffers in each direction).
  *
  * Each buffer will have 16 bytes for the msg header and 496 bytes for
  * the payload.
  *
- * This will require a total space of 256KB for the buffers.
+ * This will utilize a maximum total space of 256KB for the buffers.
  *
  * We might also want to add support for user-provided buffers in time.
  * This will allow bigger buffer size flexibility, and can also be used
@@ -102,9 +105,8 @@ struct rpmsg_channel_info {
  * can change this without changing anything in the firmware of the remote
  * processor.
  */
-#define RPMSG_NUM_BUFS		(512)
+#define MAX_RPMSG_NUM_BUFS	(256)
 #define RPMSG_BUF_SIZE		(512)
-#define RPMSG_TOTAL_BUF_SPACE	(RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
 
 /*
  * Local addresses are dynamically allocated on-demand.
@@ -579,7 +581,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
 	 * either pick the next unused tx buffer
 	 * (half of our buffers are used for sending messages)
 	 */
-	if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+	if (vrp->last_sbuf < vrp->num_bufs / 2)
 		ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
 	/* or recycle a used one */
 	else
@@ -948,6 +950,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	struct virtproc_info *vrp;
 	void *bufs_va;
 	int err = 0, i;
+	size_t total_buf_space;
 
 	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 	if (!vrp)
@@ -968,10 +971,24 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rvq = vqs[0];
 	vrp->svq = vqs[1];
 
+	vrp->num_bufs = MAX_RPMSG_NUM_BUFS * 2;
+	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS) {
+		/*
+		 * Override if the virtqueues are configured with less number of
+		 * buffers. We expect equal number of buffers for each direction
+		 * as per current code, so throw a warning if the configuration
+		 * doesn't match.
+		 */
+		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+		WARN_ON(virtqueue_get_vring_size(vrp->svq) !=
+			(vrp->num_bufs / 2));
+	}
+	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
 	/* allocate coherent memory for the buffers */
 	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
-				RPMSG_TOTAL_BUF_SPACE,
-				&vrp->bufs_dma, GFP_KERNEL);
+				     total_buf_space, &vrp->bufs_dma,
+				     GFP_KERNEL);
 	if (!bufs_va) {
 		err = -ENOMEM;
 		goto vqs_del;
@@ -984,10 +1001,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rbufs = bufs_va;
 
 	/* and half is dedicated for TX */
-	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+	vrp->sbufs = bufs_va + total_buf_space / 2;
 
 	/* set up the receive buffers */
-	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+	for (i = 0; i < vrp->num_bufs / 2; i++) {
 		struct scatterlist sg;
 		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
 
@@ -1023,8 +1040,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	return 0;
 
 free_coherent:
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					bufs_va, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  bufs_va, vrp->bufs_dma);
 vqs_del:
 	vdev->config->del_vqs(vrp->vdev);
 free_vrp:
@@ -1042,6 +1059,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
 static void rpmsg_remove(struct virtio_device *vdev)
 {
 	struct virtproc_info *vrp = vdev->priv;
+	size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
 	int ret;
 
 	vdev->config->reset(vdev);
@@ -1057,8 +1075,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
 
 	vdev->config->del_vqs(vrp->vdev);
 
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					vrp->rbufs, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  vrp->rbufs, vrp->bufs_dma);
 
 	kfree(vrp);
 }
-- 
2.0.4

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
  2014-09-16 18:33 ` Suman Anna
@ 2014-11-13 17:46   ` Suman Anna
  -1 siblings, 0 replies; 7+ messages in thread
From: Suman Anna @ 2014-11-13 17:46 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Dave Gerlach

Hi Ohad,

On 09/16/2014 01:33 PM, Suman Anna wrote:
> The buffers to be used for communication are allocated during the
> rpmsg virtio driver's probe, and the total number of buffers is
> currently hard-coded to 512. The vring configuration can vary from
> one platform to another or between different remote processors. The
> setup of the receive buffers will throw a WARN_ON if the associated
> vrings are configured with less than 256 buffers (in each direction).
> So, adjust this hard-coded value to rely on the number of buffers the
> virtqueue vring is setup with, but also limit to use 256 buffers at
> most in each direction to avoid wacky resource tables consuming up
> unreasonable memory.
> 
> NOTE: The number of buffers is already assumed to be symmetrical
> in each direction, and that logic is not unchanged.
> 
> Signed-off-by: Suman Anna <s-anna@ti.com>
> ---
> v2 changes:
> - add upper limit on buffers and update comments
> - revise patch description

Any comments on this one, if not can you pick this up for 3.19?

regards
Suman


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
@ 2014-11-13 17:46   ` Suman Anna
  0 siblings, 0 replies; 7+ messages in thread
From: Suman Anna @ 2014-11-13 17:46 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Dave Gerlach

Hi Ohad,

On 09/16/2014 01:33 PM, Suman Anna wrote:
> The buffers to be used for communication are allocated during the
> rpmsg virtio driver's probe, and the total number of buffers is
> currently hard-coded to 512. The vring configuration can vary from
> one platform to another or between different remote processors. The
> setup of the receive buffers will throw a WARN_ON if the associated
> vrings are configured with less than 256 buffers (in each direction).
> So, adjust this hard-coded value to rely on the number of buffers the
> virtqueue vring is setup with, but also limit to use 256 buffers at
> most in each direction to avoid wacky resource tables consuming up
> unreasonable memory.
> 
> NOTE: The number of buffers is already assumed to be symmetrical
> in each direction, and that logic is not unchanged.
> 
> Signed-off-by: Suman Anna <s-anna@ti.com>
> ---
> v2 changes:
> - add upper limit on buffers and update comments
> - revise patch description

Any comments on this one, if not can you pick this up for 3.19?

regards
Suman

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
  2014-11-13 17:46   ` Suman Anna
  (?)
@ 2014-11-26 16:52   ` Ohad Ben-Cohen
  2014-11-26 22:30     ` Suman Anna
  -1 siblings, 1 reply; 7+ messages in thread
From: Ohad Ben-Cohen @ 2014-11-26 16:52 UTC (permalink / raw)
  To: Suman Anna; +Cc: Rusty Russell, linux-kernel, linux-omap, Dave Gerlach

[-- Attachment #1: Type: text/plain, Size: 1254 bytes --]

Hi Suman,

On Thu, Nov 13, 2014 at 7:46 PM, Suman Anna <s-anna@ti.com> wrote:
> Hi Ohad,
>
> On 09/16/2014 01:33 PM, Suman Anna wrote:
>> The buffers to be used for communication are allocated during the
>> rpmsg virtio driver's probe, and the total number of buffers is
>> currently hard-coded to 512. The vring configuration can vary from
>> one platform to another or between different remote processors. The
>> setup of the receive buffers will throw a WARN_ON if the associated
>> vrings are configured with less than 256 buffers (in each direction).
>> So, adjust this hard-coded value to rely on the number of buffers the
>> virtqueue vring is setup with, but also limit to use 256 buffers at
>> most in each direction to avoid wacky resource tables consuming up
>> unreasonable memory.
>>
>> NOTE: The number of buffers is already assumed to be symmetrical
>> in each direction, and that logic is not unchanged.
>>
>> Signed-off-by: Suman Anna <s-anna@ti.com>
>> ---
>> v2 changes:
>> - add upper limit on buffers and update comments
>> - revise patch description
>
> Any comments on this one, if not can you pick this up for 3.19?

Did some small changes - untested, not even compiled - can you please
make sure it works for you?

Thanks,
Ohad.

[-- Attachment #2: 0001-rpmsg-use-less-buffers-when-vrings-are-small.patch --]
[-- Type: text/x-patch, Size: 5820 bytes --]

From b1b9891441fa33fd0d49b5cb3aa7f04bca1cc1db Mon Sep 17 00:00:00 2001
From: Suman Anna <s-anna@ti.com>
Date: Tue, 16 Sep 2014 13:33:07 -0500
Subject: [PATCH] rpmsg: use less buffers when vrings are small

Adjust the number of rpmsg buffers to rely on the size of the
vring, instead of using the hard coded value of 512 (256 per
direction).

This is needed when small vrings are being used, where 256
buffers are too much to fit in a vring.

While considering the vring size, keep using the 512 hard coded
value as an upper limit to avoid wacky resource tables consuming
unreasonable amount of memory.

NOTE: The number of buffers is already assumed to be symmetrical
in each direction, and that logic is unchanged.

Signed-off-by: Suman Anna <s-anna@ti.com>
[edit commit message, small code and comment simplification]
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
---
 drivers/rpmsg/virtio_rpmsg_bus.c | 44 +++++++++++++++++++++++++++-------------
 1 file changed, 30 insertions(+), 14 deletions(-)

diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..92f6af6 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
  * @svq:	tx virtqueue
  * @rbufs:	kernel address of rx buffers
  * @sbufs:	kernel address of tx buffers
+ * @num_bufs:	total number of buffers for rx and tx
  * @last_sbuf:	index of last tx buffer used
  * @bufs_dma:	dma base addr of the buffers
  * @tx_lock:	protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
 	struct virtio_device *vdev;
 	struct virtqueue *rvq, *svq;
 	void *rbufs, *sbufs;
+	unsigned int num_bufs;
 	int last_sbuf;
 	dma_addr_t bufs_dma;
 	struct mutex tx_lock;
@@ -86,13 +88,14 @@ struct rpmsg_channel_info {
 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
 
 /*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers will be computed from the number of buffers supported
+ * by the vring, upto a maximum of 512 buffers (256 in each direction).
  *
  * Each buffer will have 16 bytes for the msg header and 496 bytes for
  * the payload.
  *
- * This will require a total space of 256KB for the buffers.
+ * This will utilize a maximum total space of 256KB for the buffers.
  *
  * We might also want to add support for user-provided buffers in time.
  * This will allow bigger buffer size flexibility, and can also be used
@@ -102,9 +105,8 @@ struct rpmsg_channel_info {
  * can change this without changing anything in the firmware of the remote
  * processor.
  */
-#define RPMSG_NUM_BUFS		(512)
+#define MAX_RPMSG_NUM_BUFS	(512)
 #define RPMSG_BUF_SIZE		(512)
-#define RPMSG_TOTAL_BUF_SPACE	(RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
 
 /*
  * Local addresses are dynamically allocated on-demand.
@@ -579,7 +581,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
 	 * either pick the next unused tx buffer
 	 * (half of our buffers are used for sending messages)
 	 */
-	if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+	if (vrp->last_sbuf < vrp->num_bufs / 2)
 		ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
 	/* or recycle a used one */
 	else
@@ -948,6 +950,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	struct virtproc_info *vrp;
 	void *bufs_va;
 	int err = 0, i;
+	size_t total_buf_space;
 
 	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 	if (!vrp)
@@ -968,10 +971,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rvq = vqs[0];
 	vrp->svq = vqs[1];
 
+	/* we expect symmetric tx/rx vrings */
+	WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
+		virtqueue_get_vring_size(vrp->svq));
+
+	/* we need less buffers if vrings are small */
+	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
+		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+	else
+		vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
+
+	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
 	/* allocate coherent memory for the buffers */
 	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
-				RPMSG_TOTAL_BUF_SPACE,
-				&vrp->bufs_dma, GFP_KERNEL);
+				     total_buf_space, &vrp->bufs_dma,
+				     GFP_KERNEL);
 	if (!bufs_va) {
 		err = -ENOMEM;
 		goto vqs_del;
@@ -984,10 +999,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rbufs = bufs_va;
 
 	/* and half is dedicated for TX */
-	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+	vrp->sbufs = bufs_va + total_buf_space / 2;
 
 	/* set up the receive buffers */
-	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+	for (i = 0; i < vrp->num_bufs / 2; i++) {
 		struct scatterlist sg;
 		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
 
@@ -1023,8 +1038,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	return 0;
 
 free_coherent:
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					bufs_va, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  bufs_va, vrp->bufs_dma);
 vqs_del:
 	vdev->config->del_vqs(vrp->vdev);
 free_vrp:
@@ -1042,6 +1057,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
 static void rpmsg_remove(struct virtio_device *vdev)
 {
 	struct virtproc_info *vrp = vdev->priv;
+	size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
 	int ret;
 
 	vdev->config->reset(vdev);
@@ -1057,8 +1073,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
 
 	vdev->config->del_vqs(vrp->vdev);
 
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					vrp->rbufs, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  vrp->rbufs, vrp->bufs_dma);
 
 	kfree(vrp);
 }
-- 
1.7.10.rc3.1067.gb129051


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
  2014-11-26 16:52   ` Ohad Ben-Cohen
@ 2014-11-26 22:30     ` Suman Anna
  2014-11-28 15:18       ` Ohad Ben-Cohen
  0 siblings, 1 reply; 7+ messages in thread
From: Suman Anna @ 2014-11-26 22:30 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Dave Gerlach

Hi Ohad,

>
> On Thu, Nov 13, 2014 at 7:46 PM, Suman Anna <s-anna@ti.com> wrote:
>> Hi Ohad,
>>
>> On 09/16/2014 01:33 PM, Suman Anna wrote:
>>> The buffers to be used for communication are allocated during the
>>> rpmsg virtio driver's probe, and the total number of buffers is
>>> currently hard-coded to 512. The vring configuration can vary from
>>> one platform to another or between different remote processors. The
>>> setup of the receive buffers will throw a WARN_ON if the associated
>>> vrings are configured with less than 256 buffers (in each direction).
>>> So, adjust this hard-coded value to rely on the number of buffers the
>>> virtqueue vring is setup with, but also limit to use 256 buffers at
>>> most in each direction to avoid wacky resource tables consuming up
>>> unreasonable memory.
>>>
>>> NOTE: The number of buffers is already assumed to be symmetrical
>>> in each direction, and that logic is not unchanged.
>>>
>>> Signed-off-by: Suman Anna <s-anna@ti.com>
>>> ---
>>> v2 changes:
>>> - add upper limit on buffers and update comments
>>> - revise patch description
>>
>> Any comments on this one, if not can you pick this up for 3.19?
> 
> Did some small changes - untested, not even compiled - can you please
> make sure it works for you?

Yep, I have reviewed and verified the changes, it is good to go.

Thanks,
Suman

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCHv2] rpmsg: compute number of buffers to allocate from vrings
  2014-11-26 22:30     ` Suman Anna
@ 2014-11-28 15:18       ` Ohad Ben-Cohen
  0 siblings, 0 replies; 7+ messages in thread
From: Ohad Ben-Cohen @ 2014-11-28 15:18 UTC (permalink / raw)
  To: Suman Anna; +Cc: Rusty Russell, linux-kernel, linux-omap, Dave Gerlach

On Thu, Nov 27, 2014 at 12:30 AM, Suman Anna <s-anna@ti.com> wrote:
> Yep, I have reviewed and verified the changes, it is good to go.

Applied, thanks!

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2014-11-28 15:18 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-16 18:33 [PATCHv2] rpmsg: compute number of buffers to allocate from vrings Suman Anna
2014-09-16 18:33 ` Suman Anna
2014-11-13 17:46 ` Suman Anna
2014-11-13 17:46   ` Suman Anna
2014-11-26 16:52   ` Ohad Ben-Cohen
2014-11-26 22:30     ` Suman Anna
2014-11-28 15:18       ` Ohad Ben-Cohen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.