All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] rpmsg: compute number of buffers to allocate from vrings
@ 2014-07-03 20:53 ` Suman Anna
  0 siblings, 0 replies; 6+ messages in thread
From: Suman Anna @ 2014-07-03 20:53 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Suman Anna

The buffers to be used for communication are allocated during
the rpmsg virtio driver's probe, and the number of buffers is
currently hard-coded to 512. Remove this hard-coded value, as
this can vary from one platform to another or between different
remote processors. Instead, rely on the number of buffers the
virtqueue vring is setup with in the first place.

This fixes the WARN_ON during the setup of the receive buffers
for vrings with buffers less than 512.

NOTE: The number of buffers is already assumed to be symmetrical
in each direction, and that logic is not unchanged.

Signed-off-by: Suman Anna <s-anna@ti.com>
---
 drivers/rpmsg/virtio_rpmsg_bus.c | 41 +++++++++++++++++++++++++---------------
 1 file changed, 26 insertions(+), 15 deletions(-)

diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..e9866a6 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
  * @svq:	tx virtqueue
  * @rbufs:	kernel address of rx buffers
  * @sbufs:	kernel address of tx buffers
+ * @num_bufs:	total number of buffers for rx and tx
  * @last_sbuf:	index of last tx buffer used
  * @bufs_dma:	dma base addr of the buffers
  * @tx_lock:	protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
 	struct virtio_device *vdev;
 	struct virtqueue *rvq, *svq;
 	void *rbufs, *sbufs;
+	unsigned int num_bufs;
 	int last_sbuf;
 	dma_addr_t bufs_dma;
 	struct mutex tx_lock;
@@ -86,14 +88,14 @@ struct rpmsg_channel_info {
 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
 
 /*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers are computed from the number of buffers supported
+ * by the virtqueue vring and then use the first half of those buffers
+ * for RX, and the last half buffers for TX.
  *
  * Each buffer will have 16 bytes for the msg header and 496 bytes for
  * the payload.
  *
- * This will require a total space of 256KB for the buffers.
- *
  * We might also want to add support for user-provided buffers in time.
  * This will allow bigger buffer size flexibility, and can also be used
  * to achieve zero-copy messaging.
@@ -102,9 +104,7 @@ struct rpmsg_channel_info {
  * can change this without changing anything in the firmware of the remote
  * processor.
  */
-#define RPMSG_NUM_BUFS		(512)
 #define RPMSG_BUF_SIZE		(512)
-#define RPMSG_TOTAL_BUF_SPACE	(RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
 
 /*
  * Local addresses are dynamically allocated on-demand.
@@ -579,7 +579,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
 	 * either pick the next unused tx buffer
 	 * (half of our buffers are used for sending messages)
 	 */
-	if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+	if (vrp->last_sbuf < vrp->num_bufs / 2)
 		ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
 	/* or recycle a used one */
 	else
@@ -948,6 +948,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	struct virtproc_info *vrp;
 	void *bufs_va;
 	int err = 0, i;
+	size_t total_buf_space;
 
 	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 	if (!vrp)
@@ -968,10 +969,19 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rvq = vqs[0];
 	vrp->svq = vqs[1];
 
+	/*
+	 * We expect equal number of buffers for each direction as per current
+	 * code, so throw a warning if the configuration doesn't match. This can
+	 * easily be adjusted if needed.
+	 */
+	vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+	WARN_ON(virtqueue_get_vring_size(vrp->svq) != (vrp->num_bufs / 2));
+	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
 	/* allocate coherent memory for the buffers */
 	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
-				RPMSG_TOTAL_BUF_SPACE,
-				&vrp->bufs_dma, GFP_KERNEL);
+				     total_buf_space, &vrp->bufs_dma,
+				     GFP_KERNEL);
 	if (!bufs_va) {
 		err = -ENOMEM;
 		goto vqs_del;
@@ -984,10 +994,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rbufs = bufs_va;
 
 	/* and half is dedicated for TX */
-	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+	vrp->sbufs = bufs_va + total_buf_space / 2;
 
 	/* set up the receive buffers */
-	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+	for (i = 0; i < vrp->num_bufs / 2; i++) {
 		struct scatterlist sg;
 		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
 
@@ -1023,8 +1033,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	return 0;
 
 free_coherent:
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					bufs_va, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  bufs_va, vrp->bufs_dma);
 vqs_del:
 	vdev->config->del_vqs(vrp->vdev);
 free_vrp:
@@ -1042,6 +1052,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
 static void rpmsg_remove(struct virtio_device *vdev)
 {
 	struct virtproc_info *vrp = vdev->priv;
+	size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
 	int ret;
 
 	vdev->config->reset(vdev);
@@ -1057,8 +1068,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
 
 	vdev->config->del_vqs(vrp->vdev);
 
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					vrp->rbufs, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  vrp->rbufs, vrp->bufs_dma);
 
 	kfree(vrp);
 }
-- 
2.0.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH] rpmsg: compute number of buffers to allocate from vrings
@ 2014-07-03 20:53 ` Suman Anna
  0 siblings, 0 replies; 6+ messages in thread
From: Suman Anna @ 2014-07-03 20:53 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap, Suman Anna

The buffers to be used for communication are allocated during
the rpmsg virtio driver's probe, and the number of buffers is
currently hard-coded to 512. Remove this hard-coded value, as
this can vary from one platform to another or between different
remote processors. Instead, rely on the number of buffers the
virtqueue vring is setup with in the first place.

This fixes the WARN_ON during the setup of the receive buffers
for vrings with buffers less than 512.

NOTE: The number of buffers is already assumed to be symmetrical
in each direction, and that logic is not unchanged.

Signed-off-by: Suman Anna <s-anna@ti.com>
---
 drivers/rpmsg/virtio_rpmsg_bus.c | 41 +++++++++++++++++++++++++---------------
 1 file changed, 26 insertions(+), 15 deletions(-)

diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..e9866a6 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -41,6 +41,7 @@
  * @svq:	tx virtqueue
  * @rbufs:	kernel address of rx buffers
  * @sbufs:	kernel address of tx buffers
+ * @num_bufs:	total number of buffers for rx and tx
  * @last_sbuf:	index of last tx buffer used
  * @bufs_dma:	dma base addr of the buffers
  * @tx_lock:	protects svq, sbufs and sleepers, to allow concurrent senders.
@@ -60,6 +61,7 @@ struct virtproc_info {
 	struct virtio_device *vdev;
 	struct virtqueue *rvq, *svq;
 	void *rbufs, *sbufs;
+	unsigned int num_bufs;
 	int last_sbuf;
 	dma_addr_t bufs_dma;
 	struct mutex tx_lock;
@@ -86,14 +88,14 @@ struct rpmsg_channel_info {
 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
 
 /*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers are computed from the number of buffers supported
+ * by the virtqueue vring and then use the first half of those buffers
+ * for RX, and the last half buffers for TX.
  *
  * Each buffer will have 16 bytes for the msg header and 496 bytes for
  * the payload.
  *
- * This will require a total space of 256KB for the buffers.
- *
  * We might also want to add support for user-provided buffers in time.
  * This will allow bigger buffer size flexibility, and can also be used
  * to achieve zero-copy messaging.
@@ -102,9 +104,7 @@ struct rpmsg_channel_info {
  * can change this without changing anything in the firmware of the remote
  * processor.
  */
-#define RPMSG_NUM_BUFS		(512)
 #define RPMSG_BUF_SIZE		(512)
-#define RPMSG_TOTAL_BUF_SPACE	(RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
 
 /*
  * Local addresses are dynamically allocated on-demand.
@@ -579,7 +579,7 @@ static void *get_a_tx_buf(struct virtproc_info *vrp)
 	 * either pick the next unused tx buffer
 	 * (half of our buffers are used for sending messages)
 	 */
-	if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+	if (vrp->last_sbuf < vrp->num_bufs / 2)
 		ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
 	/* or recycle a used one */
 	else
@@ -948,6 +948,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	struct virtproc_info *vrp;
 	void *bufs_va;
 	int err = 0, i;
+	size_t total_buf_space;
 
 	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
 	if (!vrp)
@@ -968,10 +969,19 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rvq = vqs[0];
 	vrp->svq = vqs[1];
 
+	/*
+	 * We expect equal number of buffers for each direction as per current
+	 * code, so throw a warning if the configuration doesn't match. This can
+	 * easily be adjusted if needed.
+	 */
+	vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+	WARN_ON(virtqueue_get_vring_size(vrp->svq) != (vrp->num_bufs / 2));
+	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
 	/* allocate coherent memory for the buffers */
 	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
-				RPMSG_TOTAL_BUF_SPACE,
-				&vrp->bufs_dma, GFP_KERNEL);
+				     total_buf_space, &vrp->bufs_dma,
+				     GFP_KERNEL);
 	if (!bufs_va) {
 		err = -ENOMEM;
 		goto vqs_del;
@@ -984,10 +994,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	vrp->rbufs = bufs_va;
 
 	/* and half is dedicated for TX */
-	vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+	vrp->sbufs = bufs_va + total_buf_space / 2;
 
 	/* set up the receive buffers */
-	for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+	for (i = 0; i < vrp->num_bufs / 2; i++) {
 		struct scatterlist sg;
 		void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
 
@@ -1023,8 +1033,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
 	return 0;
 
 free_coherent:
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					bufs_va, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  bufs_va, vrp->bufs_dma);
 vqs_del:
 	vdev->config->del_vqs(vrp->vdev);
 free_vrp:
@@ -1042,6 +1052,7 @@ static int rpmsg_remove_device(struct device *dev, void *data)
 static void rpmsg_remove(struct virtio_device *vdev)
 {
 	struct virtproc_info *vrp = vdev->priv;
+	size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
 	int ret;
 
 	vdev->config->reset(vdev);
@@ -1057,8 +1068,8 @@ static void rpmsg_remove(struct virtio_device *vdev)
 
 	vdev->config->del_vqs(vrp->vdev);
 
-	dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
-					vrp->rbufs, vrp->bufs_dma);
+	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+			  vrp->rbufs, vrp->bufs_dma);
 
 	kfree(vrp);
 }
-- 
2.0.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] rpmsg: compute number of buffers to allocate from vrings
  2014-07-03 20:53 ` Suman Anna
  (?)
@ 2014-08-12 15:30 ` Ohad Ben-Cohen
  2014-08-12 16:19   ` Suman Anna
  -1 siblings, 1 reply; 6+ messages in thread
From: Ohad Ben-Cohen @ 2014-08-12 15:30 UTC (permalink / raw)
  To: Suman Anna; +Cc: Rusty Russell, linux-kernel, linux-omap

Hi Suman,

On Thu, Jul 3, 2014 at 11:53 PM, Suman Anna <s-anna@ti.com> wrote:
> The buffers to be used for communication are allocated during
> the rpmsg virtio driver's probe, and the number of buffers is
> currently hard-coded to 512. Remove this hard-coded value, as
> this can vary from one platform to another or between different
> remote processors. Instead, rely on the number of buffers the
> virtqueue vring is setup with in the first place.

Is there a specific problem you bumped into which you are fixing with
this approach? Can you please describe it?

I'm concerned that coupling the vring size with coherent memory
allocated by rpmsg may not be safe. It'd also be an implicit side
effect that may surprise users, so let's consider our alternatives.

Thanks,
Ohad.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] rpmsg: compute number of buffers to allocate from vrings
  2014-08-12 15:30 ` Ohad Ben-Cohen
@ 2014-08-12 16:19   ` Suman Anna
  2014-08-13 13:40     ` Ohad Ben-Cohen
  0 siblings, 1 reply; 6+ messages in thread
From: Suman Anna @ 2014-08-12 16:19 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap

Hi Ohad,

On 08/12/2014 10:30 AM, Ohad Ben-Cohen wrote:
> Hi Suman,
> 
> On Thu, Jul 3, 2014 at 11:53 PM, Suman Anna <s-anna@ti.com> wrote:
>> The buffers to be used for communication are allocated during
>> the rpmsg virtio driver's probe, and the number of buffers is
>> currently hard-coded to 512. Remove this hard-coded value, as
>> this can vary from one platform to another or between different
>> remote processors. Instead, rely on the number of buffers the
>> virtqueue vring is setup with in the first place.
> 
> Is there a specific problem you bumped into which you are fixing with
> this approach? Can you please describe it?

Yes, I was playing around with using less buffers in the remoteproc
resource table for the vrings. The remoteproc virtio code creates the
vrings using the number of buffers based on .num field value of struct
fw_rsc_vdev_vring in the resource table. The virtio rpmsg probe code
though tries to set up the receive buffers for the same virtqueue based
on the current hard-coded value of 512 buffers and virtqueue_add_inbuf
would fail as the virtqueue is created with less number of buffers and
throws a WARN_ON.


   /* set up the receive buffers */
   for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
         struct scatterlist sg;
         void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;

         sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);

         err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
                                                           GFP_KERNEL);
         WARN_ON(err); /* sanity check; this can't really happen */
   }

> I'm concerned that coupling the vring size with coherent memory
> allocated by rpmsg may not be safe. It'd also be an implicit side
> effect that may surprise users, so let's consider our alternatives.

If anything, we are allocating more buffers if the configuration uses
smaller number of buffers. This can be autoconfigured properly using the
virtqueue_get_vring_size from the virtqueue it wants to add the buffers to.

regards
Suman


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] rpmsg: compute number of buffers to allocate from vrings
  2014-08-12 16:19   ` Suman Anna
@ 2014-08-13 13:40     ` Ohad Ben-Cohen
  2014-08-13 15:47       ` Suman Anna
  0 siblings, 1 reply; 6+ messages in thread
From: Ohad Ben-Cohen @ 2014-08-13 13:40 UTC (permalink / raw)
  To: Suman Anna; +Cc: Rusty Russell, linux-kernel, linux-omap

Hi Suman,

On Tue, Aug 12, 2014 at 7:19 PM, Suman Anna <s-anna@ti.com> wrote:
> Yes, I was playing around with using less buffers in the remoteproc
> resource table for the vrings. The remoteproc virtio code creates the
> vrings using the number of buffers based on .num field value of struct
> fw_rsc_vdev_vring in the resource table. The virtio rpmsg probe code
> though tries to set up the receive buffers for the same virtqueue based
> on the current hard-coded value of 512 buffers and virtqueue_add_inbuf
> would fail as the virtqueue is created with less number of buffers and
> throws a WARN_ON.

Gotcha - thanks for the details.

Limiting the number of buffers in case the vrings are too small makes
sense, but let's use RPMSG_NUM_BUFS as an upper bound, so wacky
resource tables won't trigger unreasonable memory waste.

Something in the lines of:

vrp->num_bufs = min(PMSG_NUM_BUFS, virtqueue_get_vring_size(vrp->rvq) * 2);

Should probably do the trick.

Does this satisfy your requirement?

Thanks,
Ohad.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] rpmsg: compute number of buffers to allocate from vrings
  2014-08-13 13:40     ` Ohad Ben-Cohen
@ 2014-08-13 15:47       ` Suman Anna
  0 siblings, 0 replies; 6+ messages in thread
From: Suman Anna @ 2014-08-13 15:47 UTC (permalink / raw)
  To: Ohad Ben-Cohen; +Cc: Rusty Russell, linux-kernel, linux-omap

Hi Ohad,

On 08/13/2014 08:40 AM, Ohad Ben-Cohen wrote:
> Hi Suman,
> 
> On Tue, Aug 12, 2014 at 7:19 PM, Suman Anna <s-anna@ti.com> wrote:
>> Yes, I was playing around with using less buffers in the remoteproc
>> resource table for the vrings. The remoteproc virtio code creates the
>> vrings using the number of buffers based on .num field value of struct
>> fw_rsc_vdev_vring in the resource table. The virtio rpmsg probe code
>> though tries to set up the receive buffers for the same virtqueue based
>> on the current hard-coded value of 512 buffers and virtqueue_add_inbuf
>> would fail as the virtqueue is created with less number of buffers and
>> throws a WARN_ON.
> 
> Gotcha - thanks for the details.
> 
> Limiting the number of buffers in case the vrings are too small makes
> sense, but let's use RPMSG_NUM_BUFS as an upper bound, so wacky
> resource tables won't trigger unreasonable memory waste.
> 
> Something in the lines of:
> 
> vrp->num_bufs = min(PMSG_NUM_BUFS, virtqueue_get_vring_size(vrp->rvq) * 2);
> 
> Should probably do the trick.
> 
> Does this satisfy your requirement?

Yeah, this will work for me. I will go ahead and add a WARN_ON as well
to detect above wacky condition, and if someone really needs more
buffers in the future, we can revisit this.

regards
Suman

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2014-08-13 15:47 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-07-03 20:53 [PATCH] rpmsg: compute number of buffers to allocate from vrings Suman Anna
2014-07-03 20:53 ` Suman Anna
2014-08-12 15:30 ` Ohad Ben-Cohen
2014-08-12 16:19   ` Suman Anna
2014-08-13 13:40     ` Ohad Ben-Cohen
2014-08-13 15:47       ` Suman Anna

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.