All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue
@ 2010-06-20 21:24 Linus Walleij
  2010-06-29  8:29 ` CoffBeta
  0 siblings, 1 reply; 5+ messages in thread
From: Linus Walleij @ 2010-06-20 21:24 UTC (permalink / raw)
  To: Dan Williams; +Cc: linux-kernel, Per Friden, Jonas Aaberg, Linus Walleij

From: Per Friden <per.friden@stericsson.com>

Fixed lli_max=1 issue in case of full lcla, currently this case
is not properly handled.

Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
---
 drivers/dma/ste_dma40.c |   62 ++++++++++++++++++++++-------------------------
 1 files changed, 29 insertions(+), 33 deletions(-)

diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c426829..4618d6c 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -81,9 +81,10 @@ struct d40_lli_pool {
  * lli_len equals one.
  * @lli_log: Same as above but for logical channels.
  * @lli_pool: The pool with two entries pre-allocated.
- * @lli_len: Number of LLI's in lli_pool
- * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
- * then this transfer job is done.
+ * @lli_len: Number of llis of current descriptor.
+ * @lli_count: Number of transfered llis.
+ * @lli_tx_len: Max number of LLIs per transfer, there can be
+ * many transfer for one descriptor.
  * @txd: DMA engine struct. Used for among other things for communication
  * during a transfer.
  * @node: List entry.
@@ -100,8 +101,9 @@ struct d40_desc {
 	struct d40_log_lli_bidir	 lli_log;
 
 	struct d40_lli_pool		 lli_pool;
-	u32				 lli_len;
-	u32				 lli_tcount;
+	int				 lli_len;
+	int				 lli_count;
+	u32				 lli_tx_len;
 
 	struct dma_async_tx_descriptor	 txd;
 	struct list_head		 node;
@@ -365,11 +367,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
 	return cookie;
 }
 
-static void d40_desc_reset(struct d40_desc *d40d)
-{
-	d40d->lli_tcount = 0;
-}
-
 static void d40_desc_remove(struct d40_desc *d40d)
 {
 	list_del(&d40d->node);
@@ -738,25 +735,18 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
 				  d40c->phy_chan->num,
 				  d40d->lli_phy.dst,
 				  d40d->lli_phy.src);
-		d40d->lli_tcount = d40d->lli_len;
 	} else if (d40d->lli_log.dst && d40d->lli_log.src) {
-		u32 lli_len;
 		struct d40_log_lli *src = d40d->lli_log.src;
 		struct d40_log_lli *dst = d40d->lli_log.dst;
 
-		src += d40d->lli_tcount;
-		dst += d40d->lli_tcount;
-
-		if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
-			lli_len = d40d->lli_len;
-		else
-			lli_len = d40c->base->plat_data->llis_per_log;
-		d40d->lli_tcount += lli_len;
+		src += d40d->lli_count;
+		dst += d40d->lli_count;
 		d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
 				  d40c->lcla.dst,
 				  dst, src,
 				  d40c->base->plat_data->llis_per_log);
 	}
+	d40d->lli_count += d40d->lli_tx_len;
 }
 
 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -838,7 +828,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
 	if (d40d == NULL)
 		return;
 
-	if (d40d->lli_tcount < d40d->lli_len) {
+	if (d40d->lli_count < d40d->lli_len) {
 
 		d40_desc_load(d40c, d40d);
 		/* Start dma job */
@@ -891,7 +881,6 @@ static void dma_tasklet(unsigned long data)
 		/* Return desc to free-list */
 		d40_desc_free(d40c, d40d_fin);
 	} else {
-		d40_desc_reset(d40d_fin);
 		if (!d40d_fin->is_in_client_list) {
 			d40_desc_remove(d40d_fin);
 			list_add_tail(&d40d_fin->node, &d40c->client);
@@ -1573,7 +1562,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
 	struct d40_chan *d40c = container_of(chan, struct d40_chan,
 					     chan);
 	unsigned long flg;
-	int lli_max = d40c->base->plat_data->llis_per_log;
 
 
 	spin_lock_irqsave(&d40c->lock, flg);
@@ -1584,10 +1572,13 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
 
 	memset(d40d, 0, sizeof(struct d40_desc));
 	d40d->lli_len = sgl_len;
-
+	d40d->lli_tx_len = d40d->lli_len;
 	d40d->txd.flags = flags;
 
 	if (d40c->log_num != D40_PHY_CHAN) {
+		if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
+			d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
+
 		if (sgl_len > 1)
 			/*
 			 * Check if there is space available in lcla. If not,
@@ -1596,7 +1587,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
 			 */
 			if (d40_lcla_id_get(d40c,
 					    &d40c->base->lcla_pool) != 0)
-				lli_max = 1;
+				d40d->lli_tx_len = 1;
 
 		if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
 			dev_err(&d40c->chan.dev->device,
@@ -1610,7 +1601,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
 					 d40d->lli_log.src,
 					 d40c->log_def.lcsp1,
 					 d40c->dma_cfg.src_info.data_width,
-					 flags & DMA_PREP_INTERRUPT, lli_max,
+					 flags & DMA_PREP_INTERRUPT,
+					 d40d->lli_tx_len,
 					 d40c->base->plat_data->llis_per_log);
 
 		(void) d40_log_sg_to_lli(d40c->lcla.dst_id,
@@ -1619,7 +1611,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
 					 d40d->lli_log.dst,
 					 d40c->log_def.lcsp3,
 					 d40c->dma_cfg.dst_info.data_width,
-					 flags & DMA_PREP_INTERRUPT, lli_max,
+					 flags & DMA_PREP_INTERRUPT,
+					 d40d->lli_tx_len,
 					 d40c->base->plat_data->llis_per_log);
 
 
@@ -1794,6 +1787,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
 			goto err;
 		}
 		d40d->lli_len = 1;
+		d40d->lli_tx_len = 1;
 
 		d40_log_fill_lli(d40d->lli_log.src,
 				 src,
@@ -1869,7 +1863,6 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
 {
 	dma_addr_t dev_addr = 0;
 	int total_size;
-	int lli_max = d40c->base->plat_data->llis_per_log;
 
 	if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
 		dev_err(&d40c->chan.dev->device,
@@ -1878,7 +1871,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
 	}
 
 	d40d->lli_len = sg_len;
-	d40d->lli_tcount = 0;
+	if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
+		d40d->lli_tx_len = d40d->lli_len;
+	else
+		d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
 
 	if (sg_len > 1)
 		/*
@@ -1887,7 +1883,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
 		 * in lcpa space.
 		 */
 		if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
-			lli_max = 1;
+			d40d->lli_tx_len = 1;
 
 	if (direction == DMA_FROM_DEVICE) {
 		dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
@@ -1899,7 +1895,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
 					       d40c->dma_cfg.dst_info.data_width,
 					       direction,
 					       flags & DMA_PREP_INTERRUPT,
-					       dev_addr, lli_max,
+					       dev_addr, d40d->lli_tx_len,
 					       d40c->base->plat_data->llis_per_log);
 	} else if (direction == DMA_TO_DEVICE) {
 		dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
@@ -1911,7 +1907,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
 					       d40c->dma_cfg.dst_info.data_width,
 					       direction,
 					       flags & DMA_PREP_INTERRUPT,
-					       dev_addr, lli_max,
+					       dev_addr, d40d->lli_tx_len,
 					       d40c->base->plat_data->llis_per_log);
 	} else
 		return -EINVAL;
@@ -1939,7 +1935,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
 	}
 
 	d40d->lli_len = sgl_len;
-	d40d->lli_tcount = 0;
+	d40d->lli_tx_len = sgl_len;
 
 	if (direction == DMA_FROM_DEVICE) {
 		dst_dev_addr = 0;
-- 
1.6.3.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue
  2010-06-20 21:24 [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue Linus Walleij
@ 2010-06-29  8:29 ` CoffBeta
  2010-06-29 11:31   ` Linus Walleij
  0 siblings, 1 reply; 5+ messages in thread
From: CoffBeta @ 2010-06-29  8:29 UTC (permalink / raw)
  To: Linus Walleij; +Cc: Dan Williams, linux-kernel, Per Friden, Jonas Aaberg

 maybe this then for drivers which cannot accept sleeping gpios:

 if (gpio_cansleep(some_gpio)) {
         dev_err(&dev, "This driver only supports non-sleeping gpios");
         return -EINVAL;
 }

 err = gpio_request(some_gpio, "some_gpio");

I think ideally, gpio_request should specify this via a flags argument, ie:

 #define GPIOF_NO_SLEEP        0x0
 #define GPIOF_CANSLEEP        0x1

 err = gpio_request(some_gpio, "some_gpio", GPIOF_NO_SLEEP);


On Mon, Jun 21, 2010 at 05:24, Linus Walleij
<linus.walleij@stericsson.com> wrote:
> From: Per Friden <per.friden@stericsson.com>
>
> Fixed lli_max=1 issue in case of full lcla, currently this case
> is not properly handled.
>
> Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com>
> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
> ---
>  drivers/dma/ste_dma40.c |   62 ++++++++++++++++++++++-------------------------
>  1 files changed, 29 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
> index c426829..4618d6c 100644
> --- a/drivers/dma/ste_dma40.c
> +++ b/drivers/dma/ste_dma40.c
> @@ -81,9 +81,10 @@ struct d40_lli_pool {
>  * lli_len equals one.
>  * @lli_log: Same as above but for logical channels.
>  * @lli_pool: The pool with two entries pre-allocated.
> - * @lli_len: Number of LLI's in lli_pool
> - * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
> - * then this transfer job is done.
> + * @lli_len: Number of llis of current descriptor.
> + * @lli_count: Number of transfered llis.
> + * @lli_tx_len: Max number of LLIs per transfer, there can be
> + * many transfer for one descriptor.
>  * @txd: DMA engine struct. Used for among other things for communication
>  * during a transfer.
>  * @node: List entry.
> @@ -100,8 +101,9 @@ struct d40_desc {
>        struct d40_log_lli_bidir         lli_log;
>
>        struct d40_lli_pool              lli_pool;
> -       u32                              lli_len;
> -       u32                              lli_tcount;
> +       int                              lli_len;
> +       int                              lli_count;
> +       u32                              lli_tx_len;
>
>        struct dma_async_tx_descriptor   txd;
>        struct list_head                 node;
> @@ -365,11 +367,6 @@ static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
>        return cookie;
>  }
>
> -static void d40_desc_reset(struct d40_desc *d40d)
> -{
> -       d40d->lli_tcount = 0;
> -}
> -
>  static void d40_desc_remove(struct d40_desc *d40d)
>  {
>        list_del(&d40d->node);
> @@ -738,25 +735,18 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
>                                  d40c->phy_chan->num,
>                                  d40d->lli_phy.dst,
>                                  d40d->lli_phy.src);
> -               d40d->lli_tcount = d40d->lli_len;
>        } else if (d40d->lli_log.dst && d40d->lli_log.src) {
> -               u32 lli_len;
>                struct d40_log_lli *src = d40d->lli_log.src;
>                struct d40_log_lli *dst = d40d->lli_log.dst;
>
> -               src += d40d->lli_tcount;
> -               dst += d40d->lli_tcount;
> -
> -               if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
> -                       lli_len = d40d->lli_len;
> -               else
> -                       lli_len = d40c->base->plat_data->llis_per_log;
> -               d40d->lli_tcount += lli_len;
> +               src += d40d->lli_count;
> +               dst += d40d->lli_count;
>                d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
>                                  d40c->lcla.dst,
>                                  dst, src,
>                                  d40c->base->plat_data->llis_per_log);
>        }
> +       d40d->lli_count += d40d->lli_tx_len;
>  }
>
>  static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
> @@ -838,7 +828,7 @@ static void dma_tc_handle(struct d40_chan *d40c)
>        if (d40d == NULL)
>                return;
>
> -       if (d40d->lli_tcount < d40d->lli_len) {
> +       if (d40d->lli_count < d40d->lli_len) {
>
>                d40_desc_load(d40c, d40d);
>                /* Start dma job */
> @@ -891,7 +881,6 @@ static void dma_tasklet(unsigned long data)
>                /* Return desc to free-list */
>                d40_desc_free(d40c, d40d_fin);
>        } else {
> -               d40_desc_reset(d40d_fin);
>                if (!d40d_fin->is_in_client_list) {
>                        d40_desc_remove(d40d_fin);
>                        list_add_tail(&d40d_fin->node, &d40c->client);
> @@ -1573,7 +1562,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
>        struct d40_chan *d40c = container_of(chan, struct d40_chan,
>                                             chan);
>        unsigned long flg;
> -       int lli_max = d40c->base->plat_data->llis_per_log;
>
>
>        spin_lock_irqsave(&d40c->lock, flg);
> @@ -1584,10 +1572,13 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
>
>        memset(d40d, 0, sizeof(struct d40_desc));
>        d40d->lli_len = sgl_len;
> -
> +       d40d->lli_tx_len = d40d->lli_len;
>        d40d->txd.flags = flags;
>
>        if (d40c->log_num != D40_PHY_CHAN) {
> +               if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
> +                       d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
> +
>                if (sgl_len > 1)
>                        /*
>                         * Check if there is space available in lcla. If not,
> @@ -1596,7 +1587,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
>                         */
>                        if (d40_lcla_id_get(d40c,
>                                            &d40c->base->lcla_pool) != 0)
> -                               lli_max = 1;
> +                               d40d->lli_tx_len = 1;
>
>                if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
>                        dev_err(&d40c->chan.dev->device,
> @@ -1610,7 +1601,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
>                                         d40d->lli_log.src,
>                                         d40c->log_def.lcsp1,
>                                         d40c->dma_cfg.src_info.data_width,
> -                                        flags & DMA_PREP_INTERRUPT, lli_max,
> +                                        flags & DMA_PREP_INTERRUPT,
> +                                        d40d->lli_tx_len,
>                                         d40c->base->plat_data->llis_per_log);
>
>                (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
> @@ -1619,7 +1611,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
>                                         d40d->lli_log.dst,
>                                         d40c->log_def.lcsp3,
>                                         d40c->dma_cfg.dst_info.data_width,
> -                                        flags & DMA_PREP_INTERRUPT, lli_max,
> +                                        flags & DMA_PREP_INTERRUPT,
> +                                        d40d->lli_tx_len,
>                                         d40c->base->plat_data->llis_per_log);
>
>
> @@ -1794,6 +1787,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
>                        goto err;
>                }
>                d40d->lli_len = 1;
> +               d40d->lli_tx_len = 1;
>
>                d40_log_fill_lli(d40d->lli_log.src,
>                                 src,
> @@ -1869,7 +1863,6 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
>  {
>        dma_addr_t dev_addr = 0;
>        int total_size;
> -       int lli_max = d40c->base->plat_data->llis_per_log;
>
>        if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
>                dev_err(&d40c->chan.dev->device,
> @@ -1878,7 +1871,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
>        }
>
>        d40d->lli_len = sg_len;
> -       d40d->lli_tcount = 0;
> +       if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
> +               d40d->lli_tx_len = d40d->lli_len;
> +       else
> +               d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
>
>        if (sg_len > 1)
>                /*
> @@ -1887,7 +1883,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
>                 * in lcpa space.
>                 */
>                if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
> -                       lli_max = 1;
> +                       d40d->lli_tx_len = 1;
>
>        if (direction == DMA_FROM_DEVICE) {
>                dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
> @@ -1899,7 +1895,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
>                                               d40c->dma_cfg.dst_info.data_width,
>                                               direction,
>                                               flags & DMA_PREP_INTERRUPT,
> -                                              dev_addr, lli_max,
> +                                              dev_addr, d40d->lli_tx_len,
>                                               d40c->base->plat_data->llis_per_log);
>        } else if (direction == DMA_TO_DEVICE) {
>                dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
> @@ -1911,7 +1907,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
>                                               d40c->dma_cfg.dst_info.data_width,
>                                               direction,
>                                               flags & DMA_PREP_INTERRUPT,
> -                                              dev_addr, lli_max,
> +                                              dev_addr, d40d->lli_tx_len,
>                                               d40c->base->plat_data->llis_per_log);
>        } else
>                return -EINVAL;
> @@ -1939,7 +1935,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
>        }
>
>        d40d->lli_len = sgl_len;
> -       d40d->lli_tcount = 0;
> +       d40d->lli_tx_len = sgl_len;
>
>        if (direction == DMA_FROM_DEVICE) {
>                dst_dev_addr = 0;
> --
> 1.6.3.3
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue
  2010-06-29  8:29 ` CoffBeta
@ 2010-06-29 11:31   ` Linus Walleij
  2010-06-30  7:54     ` CoffBeta
  0 siblings, 1 reply; 5+ messages in thread
From: Linus Walleij @ 2010-06-29 11:31 UTC (permalink / raw)
  To: CoffBeta; +Cc: Dan Williams, linux-kernel, Per Friden, Jonas Aaberg

2010/6/29 CoffBeta <coffbeta@gmail.com>:

>  maybe this then for drivers which cannot accept sleeping gpios:

What? This is a patch to a DMA driver.

Are you pushing the wrong key or a new type of spam robot? :-?

Yours,
Linus Walleij

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue
  2010-06-29 11:31   ` Linus Walleij
@ 2010-06-30  7:54     ` CoffBeta
  2010-06-30  7:57       ` CoffBeta
  0 siblings, 1 reply; 5+ messages in thread
From: CoffBeta @ 2010-06-30  7:54 UTC (permalink / raw)
  To: Linus Walleij; +Cc: Dan Williams, linux-kernel, Per Friden, Jonas Aaberg

yes

On Tue, Jun 29, 2010 at 19:31, Linus Walleij <linus.ml.walleij@gmail.com> wrote:
> 2010/6/29 CoffBeta <coffbeta@gmail.com>:
>
>>  maybe this then for drivers which cannot accept sleeping gpios:
>
> What? This is a patch to a DMA driver.
>
> Are you pushing the wrong key or a new type of spam robot? :-?
>
> Yours,
> Linus Walleij
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue
  2010-06-30  7:54     ` CoffBeta
@ 2010-06-30  7:57       ` CoffBeta
  0 siblings, 0 replies; 5+ messages in thread
From: CoffBeta @ 2010-06-30  7:57 UTC (permalink / raw)
  To: Linus Walleij; +Cc: Dan Williams, linux-kernel, Per Friden, Jonas Aaberg

where is Apollo-1 Pro  nand fash start address???I can' find it

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2010-06-30  7:58 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-06-20 21:24 [PATCH 01/19] DMAENGINE: ste_dma40: fixed lli_max=1 issue Linus Walleij
2010-06-29  8:29 ` CoffBeta
2010-06-29 11:31   ` Linus Walleij
2010-06-30  7:54     ` CoffBeta
2010-06-30  7:57       ` CoffBeta

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.