All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] crypto: caam - power management support for caam job-ring
@ 2014-03-20 18:50 Yashpal Dutta
  2014-03-21  5:54 ` Ruchika Gupta
  2014-03-22 16:24 ` Ben Hutchings
  0 siblings, 2 replies; 5+ messages in thread
From: Yashpal Dutta @ 2014-03-20 18:50 UTC (permalink / raw)
  To: linux-crypto, horia.geanta, vakul, ruchika.gupta; +Cc: Yashpal Dutta, stable

Job ring is suspended gracefully and resume afresh.

Both Sleep (where device will remain powered-on) and Deep-sleep (where
device will be powered-down are handled gracefully. Persistance sessions
are not supported across deep-sleep.

Cc: stable@vger.kernel.org
Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
---
 drivers/crypto/caam/intern.h |   2 +
 drivers/crypto/caam/jr.c     | 257 +++++++++++++++++++++++++++++++------------
 2 files changed, 190 insertions(+), 69 deletions(-)

diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 6d85fcc..0d41d05 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -54,6 +54,8 @@ struct caam_drv_private_jr {
 	int inp_ring_write_index;	/* Input index "tail" */
 	int head;			/* entinfo (s/w ring) head index */
 	dma_addr_t *inpring;	/* Base of input ring, alloc DMA-safe */
+	dma_addr_t inpbusaddr;	/* Input ring physical address */
+	dma_addr_t outbusaddr;	/* Output ring physical address */
 	spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
 	int out_ring_read_index;	/* Output index "tail" */
 	int tail;			/* entinfo (s/w ring) tail index */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1d80bd3..2a79218 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -68,7 +68,6 @@ static int caam_reset_hw_jr(struct device *dev)
 int caam_jr_shutdown(struct device *dev)
 {
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
-	dma_addr_t inpbusaddr, outbusaddr;
 	int ret;
 
 	ret = caam_reset_hw_jr(dev);
@@ -78,13 +77,10 @@ int caam_jr_shutdown(struct device *dev)
 	/* Release interrupt */
 	free_irq(jrp->irq, dev);
 
-	/* Free rings */
-	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
-	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
 	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
-			  jrp->inpring, inpbusaddr);
+			  jrp->inpring, jrp->inpbusaddr);
 	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
-			  jrp->outring, outbusaddr);
+			  jrp->outring, jrp->outbusaddr);
 	kfree(jrp->entinfo);
 
 	return ret;
@@ -159,78 +155,82 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 	return IRQ_HANDLED;
 }
 
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void caam_jr_dequeue(unsigned long devarg)
+/* Consume the processed output ring Job */
+static inline void caam_jr_consume(struct device *dev)
 {
 	int hw_idx, sw_idx, i, head, tail;
-	struct device *dev = (struct device *)devarg;
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
 	u32 *userdesc, userstatus;
 	void *userarg;
 
-	while (rd_reg32(&jrp->rregs->outring_used)) {
+	head = ACCESS_ONCE(jrp->head);
+	spin_lock(&jrp->outlock);
 
-		head = ACCESS_ONCE(jrp->head);
+	sw_idx = tail = jrp->tail;
+	hw_idx = jrp->out_ring_read_index;
 
-		spin_lock(&jrp->outlock);
+	for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+		sw_idx = (tail + i) & (JOBR_DEPTH - 1);
 
-		sw_idx = tail = jrp->tail;
-		hw_idx = jrp->out_ring_read_index;
+		smp_read_barrier_depends();
+		if (jrp->outring[hw_idx].desc ==
+		    jrp->entinfo[sw_idx].desc_addr_dma)
+			break; /* found */
+	}
+	/* we should never fail to find a matching descriptor */
+	BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
 
-		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
-			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+	/* Unmap just-run descriptor so we can post-process */
+	dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+			 jrp->entinfo[sw_idx].desc_size,
+			 DMA_TO_DEVICE);
 
-			smp_read_barrier_depends();
+	/* mark completed, avoid matching on a recycled desc addr */
+	jrp->entinfo[sw_idx].desc_addr_dma = 0;
 
-			if (jrp->outring[hw_idx].desc ==
-			    jrp->entinfo[sw_idx].desc_addr_dma)
-				break; /* found */
-		}
-		/* we should never fail to find a matching descriptor */
-		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
-
-		/* Unmap just-run descriptor so we can post-process */
-		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
-				 jrp->entinfo[sw_idx].desc_size,
-				 DMA_TO_DEVICE);
-
-		/* mark completed, avoid matching on a recycled desc addr */
-		jrp->entinfo[sw_idx].desc_addr_dma = 0;
-
-		/* Stash callback params for use outside of lock */
-		usercall = jrp->entinfo[sw_idx].callbk;
-		userarg = jrp->entinfo[sw_idx].cbkarg;
-		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
-		userstatus = jrp->outring[hw_idx].jrstatus;
-
-		/* set done */
-		wr_reg32(&jrp->rregs->outring_rmvd, 1);
-
-		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
-					   (JOBR_DEPTH - 1);
-
-		/*
-		 * if this job completed out-of-order, do not increment
-		 * the tail.  Otherwise, increment tail by 1 plus the
-		 * number of subsequent jobs already completed out-of-order
-		 */
-		if (sw_idx == tail) {
-			do {
-				tail = (tail + 1) & (JOBR_DEPTH - 1);
-				smp_read_barrier_depends();
-			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
-				 jrp->entinfo[tail].desc_addr_dma == 0);
-
-			jrp->tail = tail;
-		}
+	/* Stash callback params for use outside of lock */
+	usercall = jrp->entinfo[sw_idx].callbk;
+	userarg = jrp->entinfo[sw_idx].cbkarg;
+	userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+	userstatus = jrp->outring[hw_idx].jrstatus;
 
-		spin_unlock(&jrp->outlock);
+	/* set done */
+	wr_reg32(&jrp->rregs->outring_rmvd, 1);
 
-		/* Finally, execute user's callback */
-		usercall(dev, userdesc, userstatus, userarg);
+	jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
+				   (JOBR_DEPTH - 1);
+
+	/*
+	 * if this job completed out-of-order, do not increment
+	 * the tail.  Otherwise, increment tail by 1 plus the
+	 * number of subsequent jobs already completed out-of-order
+	 */
+	if (sw_idx == tail) {
+		do {
+			tail = (tail + 1) & (JOBR_DEPTH - 1);
+			smp_read_barrier_depends();
+		} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+			 jrp->entinfo[tail].desc_addr_dma == 0);
+
+		jrp->tail = tail;
 	}
 
+	spin_unlock(&jrp->outlock);
+
+	/* Finally, execute user's callback */
+	usercall(dev, userdesc, userstatus, userarg);
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
+{
+	struct device *dev = (struct device *)devarg;
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+	while (rd_reg32(&jrp->rregs->outring_used))
+		caam_jr_consume(dev);
+
 	/* reenable / unmask IRQs */
 	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
 }
@@ -368,13 +368,131 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 }
 EXPORT_SYMBOL(caam_jr_enqueue);
 
+#ifdef CONFIG_PM
+/* Return Failure for Job pending in input ring */
+static void caam_fail_inpjobs(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+	u32 *userdesc;
+	void *userarg;
+	int sw_idx;
+
+	/* Check for jobs left after reaching output ring and return error */
+	for (sw_idx = 0; sw_idx < JOBR_DEPTH; sw_idx++) {
+		if (jrp->entinfo[sw_idx].desc_addr_dma != 0) {
+			usercall = jrp->entinfo[sw_idx].callbk;
+			userarg = jrp->entinfo[sw_idx].cbkarg;
+			userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+			usercall(dev, userdesc, -EIO, userarg);
+			jrp->entinfo[sw_idx].desc_addr_dma = 0;
+		}
+	}
+}
+
+/* Suspend handler for Job Ring */
+static int jr_suspend(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+	unsigned int timeout = 100000;
+	int ret = 0;
+
+	/*
+	 * mask interrupts since we are going to poll
+	 * for reset completion status
+	 */
+	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+
+	/*
+	 * Cleanup all the pending completed Jobs to make room for
+	 * in Job's coming to Outring during flush
+	 */
+	while (rd_reg32(&jrp->rregs->outring_used))
+		caam_jr_consume(dev);
+
+	/* initiate flush (required prior to reset) */
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+		JRINT_ERR_HALT_INPROGRESS) && --timeout)
+		cpu_relax();
+
+	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+		ret = -EIO;
+		goto err;
+	}
+
+	/*
+	 * Disallow any further addition in Job Ring by making input_ring
+	 * size ZERO. If output complete ring processing try to enqueue
+	 * more Job's back to JR, it will return -EBUSY
+	 */
+	wr_reg32(&jrp->rregs->inpring_size, 0);
+
+	while (rd_reg32(&jrp->rregs->outring_used))
+		caam_jr_consume(dev);
+
+	caam_fail_inpjobs(dev);
+
+	/* initiate reset */
+	timeout = 100000;
+	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+		cpu_relax();
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+		ret = -EIO;
+		goto err;
+	}
+
+err:
+	/* unmask interrupts */
+	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+	return ret;
+}
+
+/* Resume handler for Job Ring */
+static int jr_resume(struct device *dev)
+{
+	struct caam_drv_private_jr *jrp;
+
+	jrp = dev_get_drvdata(dev);
+
+	memset(jrp->entinfo, 0, sizeof(struct caam_jrentry_info) * JOBR_DEPTH);
+
+	/* Setup rings */
+	jrp->inp_ring_write_index = 0;
+	jrp->out_ring_read_index = 0;
+	jrp->head = 0;
+	jrp->tail = 0;
+
+	/* Setup ring base registers */
+	wr_reg64(&jrp->rregs->inpring_base, jrp->inpbusaddr);
+	wr_reg64(&jrp->rregs->outring_base, jrp->outbusaddr);
+	/* Setup ring size */
+	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
+		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+	return 0;
+}
+
+const struct dev_pm_ops jr_pm_ops = {
+	.suspend = jr_suspend,
+	.resume = jr_resume,
+};
+#endif /* CONFIG_PM */
+
 /*
  * Init JobR independent of platform property detection
  */
 static int caam_jr_init(struct device *dev)
 {
 	struct caam_drv_private_jr *jrp;
-	dma_addr_t inpbusaddr, outbusaddr;
 	int i, error;
 
 	jrp = dev_get_drvdata(dev);
@@ -397,10 +515,11 @@ static int caam_jr_init(struct device *dev)
 		return error;
 
 	jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
-					  &inpbusaddr, GFP_KERNEL);
+					  &jrp->inpbusaddr, GFP_KERNEL);
 
 	jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
-					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+					  JOBR_DEPTH, &jrp->outbusaddr,
+					  GFP_KERNEL);
 
 	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
 			       GFP_KERNEL);
@@ -412,17 +531,14 @@ static int caam_jr_init(struct device *dev)
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < JOBR_DEPTH; i++)
-		jrp->entinfo[i].desc_addr_dma = !0;
-
 	/* Setup rings */
 	jrp->inp_ring_write_index = 0;
 	jrp->out_ring_read_index = 0;
 	jrp->head = 0;
 	jrp->tail = 0;
 
-	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
-	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+	wr_reg64(&jrp->rregs->inpring_base, jrp->inpbusaddr);
+	wr_reg64(&jrp->rregs->outring_base, jrp->outbusaddr);
 	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
 	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
 
@@ -518,6 +634,9 @@ static struct platform_driver caam_jr_driver = {
 		.name = "caam_jr",
 		.owner = THIS_MODULE,
 		.of_match_table = caam_jr_match,
+#ifdef CONFIG_PM
+		.pm = &jr_pm_ops,
+#endif
 	},
 	.probe       = caam_jr_probe,
 	.remove      = caam_jr_remove,
-- 
1.8.1.2

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* RE: [PATCH v3] crypto: caam - power management support for caam job-ring
  2014-03-20 18:50 [PATCH v3] crypto: caam - power management support for caam job-ring Yashpal Dutta
@ 2014-03-21  5:54 ` Ruchika Gupta
  2014-03-22 16:24 ` Ben Hutchings
  1 sibling, 0 replies; 5+ messages in thread
From: Ruchika Gupta @ 2014-03-21  5:54 UTC (permalink / raw)
  To: yashpal.dutta, linux-crypto, Horia.Geanta, Vakul Garg
  Cc: yashpal.dutta, stable

Acked-by: Ruchika Gupta <ruchika.gupta@freescale.com>

> -----Original Message-----
> From: Yashpal Dutta [mailto:yashpal.dutta@freescale.com]
> Sent: Friday, March 21, 2014 12:21 AM
> To: linux-crypto@vger.kernel.org; Geanta Neag Horia Ioan-B05471; Garg Vakul-
> B16394; Gupta Ruchika-R66431
> Cc: Dutta Yashpal-B05456; stable@vger.kernel.org
> Subject: [PATCH v3] crypto: caam - power management support for caam job-
> ring
> 
> Job ring is suspended gracefully and resume afresh.
> 
> Both Sleep (where device will remain powered-on) and Deep-sleep (where
> device will be powered-down are handled gracefully. Persistance sessions are
> not supported across deep-sleep.
> 
> Cc: stable@vger.kernel.org
> Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
> ---
>  drivers/crypto/caam/intern.h |   2 +
>  drivers/crypto/caam/jr.c     | 257 +++++++++++++++++++++++++++++++---------
> ---
>  2 files changed, 190 insertions(+), 69 deletions(-)
> 
> diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
> index 6d85fcc..0d41d05 100644
> --- a/drivers/crypto/caam/intern.h
> +++ b/drivers/crypto/caam/intern.h
> @@ -54,6 +54,8 @@ struct caam_drv_private_jr {
>  	int inp_ring_write_index;	/* Input index "tail" */
>  	int head;			/* entinfo (s/w ring) head index */
>  	dma_addr_t *inpring;	/* Base of input ring, alloc DMA-safe */
> +	dma_addr_t inpbusaddr;	/* Input ring physical address */
> +	dma_addr_t outbusaddr;	/* Output ring physical address */
>  	spinlock_t outlock ____cacheline_aligned; /* Output ring index lock
> */
>  	int out_ring_read_index;	/* Output index "tail" */
>  	int tail;			/* entinfo (s/w ring) tail index */
> diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index
> 1d80bd3..2a79218 100644
> --- a/drivers/crypto/caam/jr.c
> +++ b/drivers/crypto/caam/jr.c
> @@ -68,7 +68,6 @@ static int caam_reset_hw_jr(struct device *dev)  int
> caam_jr_shutdown(struct device *dev)  {
>  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
> -	dma_addr_t inpbusaddr, outbusaddr;
>  	int ret;
> 
>  	ret = caam_reset_hw_jr(dev);
> @@ -78,13 +77,10 @@ int caam_jr_shutdown(struct device *dev)
>  	/* Release interrupt */
>  	free_irq(jrp->irq, dev);
> 
> -	/* Free rings */
> -	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
> -	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
>  	dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
> -			  jrp->inpring, inpbusaddr);
> +			  jrp->inpring, jrp->inpbusaddr);
>  	dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
> -			  jrp->outring, outbusaddr);
> +			  jrp->outring, jrp->outbusaddr);
>  	kfree(jrp->entinfo);
> 
>  	return ret;
> @@ -159,78 +155,82 @@ static irqreturn_t caam_jr_interrupt(int irq, void
> *st_dev)
>  	return IRQ_HANDLED;
>  }
> 
> -/* Deferred service handler, run as interrupt-fired tasklet */ -static void
> caam_jr_dequeue(unsigned long devarg)
> +/* Consume the processed output ring Job */ static inline void
> +caam_jr_consume(struct device *dev)
>  {
>  	int hw_idx, sw_idx, i, head, tail;
> -	struct device *dev = (struct device *)devarg;
>  	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
>  	void (*usercall)(struct device *dev, u32 *desc, u32 status, void
> *arg);
>  	u32 *userdesc, userstatus;
>  	void *userarg;
> 
> -	while (rd_reg32(&jrp->rregs->outring_used)) {
> +	head = ACCESS_ONCE(jrp->head);
> +	spin_lock(&jrp->outlock);
> 
> -		head = ACCESS_ONCE(jrp->head);
> +	sw_idx = tail = jrp->tail;
> +	hw_idx = jrp->out_ring_read_index;
> 
> -		spin_lock(&jrp->outlock);
> +	for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
> +		sw_idx = (tail + i) & (JOBR_DEPTH - 1);
> 
> -		sw_idx = tail = jrp->tail;
> -		hw_idx = jrp->out_ring_read_index;
> +		smp_read_barrier_depends();
> +		if (jrp->outring[hw_idx].desc ==
> +		    jrp->entinfo[sw_idx].desc_addr_dma)
> +			break; /* found */
> +	}
> +	/* we should never fail to find a matching descriptor */
> +	BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
> 
> -		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
> -			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
> +	/* Unmap just-run descriptor so we can post-process */
> +	dma_unmap_single(dev, jrp->outring[hw_idx].desc,
> +			 jrp->entinfo[sw_idx].desc_size,
> +			 DMA_TO_DEVICE);
> 
> -			smp_read_barrier_depends();
> +	/* mark completed, avoid matching on a recycled desc addr */
> +	jrp->entinfo[sw_idx].desc_addr_dma = 0;
> 
> -			if (jrp->outring[hw_idx].desc ==
> -			    jrp->entinfo[sw_idx].desc_addr_dma)
> -				break; /* found */
> -		}
> -		/* we should never fail to find a matching descriptor */
> -		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
> -
> -		/* Unmap just-run descriptor so we can post-process */
> -		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
> -				 jrp->entinfo[sw_idx].desc_size,
> -				 DMA_TO_DEVICE);
> -
> -		/* mark completed, avoid matching on a recycled desc addr */
> -		jrp->entinfo[sw_idx].desc_addr_dma = 0;
> -
> -		/* Stash callback params for use outside of lock */
> -		usercall = jrp->entinfo[sw_idx].callbk;
> -		userarg = jrp->entinfo[sw_idx].cbkarg;
> -		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
> -		userstatus = jrp->outring[hw_idx].jrstatus;
> -
> -		/* set done */
> -		wr_reg32(&jrp->rregs->outring_rmvd, 1);
> -
> -		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
> -					   (JOBR_DEPTH - 1);
> -
> -		/*
> -		 * if this job completed out-of-order, do not increment
> -		 * the tail.  Otherwise, increment tail by 1 plus the
> -		 * number of subsequent jobs already completed out-of-order
> -		 */
> -		if (sw_idx == tail) {
> -			do {
> -				tail = (tail + 1) & (JOBR_DEPTH - 1);
> -				smp_read_barrier_depends();
> -			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
> -				 jrp->entinfo[tail].desc_addr_dma == 0);
> -
> -			jrp->tail = tail;
> -		}
> +	/* Stash callback params for use outside of lock */
> +	usercall = jrp->entinfo[sw_idx].callbk;
> +	userarg = jrp->entinfo[sw_idx].cbkarg;
> +	userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
> +	userstatus = jrp->outring[hw_idx].jrstatus;
> 
> -		spin_unlock(&jrp->outlock);
> +	/* set done */
> +	wr_reg32(&jrp->rregs->outring_rmvd, 1);
> 
> -		/* Finally, execute user's callback */
> -		usercall(dev, userdesc, userstatus, userarg);
> +	jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
> +				   (JOBR_DEPTH - 1);
> +
> +	/*
> +	 * if this job completed out-of-order, do not increment
> +	 * the tail.  Otherwise, increment tail by 1 plus the
> +	 * number of subsequent jobs already completed out-of-order
> +	 */
> +	if (sw_idx == tail) {
> +		do {
> +			tail = (tail + 1) & (JOBR_DEPTH - 1);
> +			smp_read_barrier_depends();
> +		} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
> +			 jrp->entinfo[tail].desc_addr_dma == 0);
> +
> +		jrp->tail = tail;
>  	}
> 
> +	spin_unlock(&jrp->outlock);
> +
> +	/* Finally, execute user's callback */
> +	usercall(dev, userdesc, userstatus, userarg); }
> +
> +/* Deferred service handler, run as interrupt-fired tasklet */ static
> +void caam_jr_dequeue(unsigned long devarg) {
> +	struct device *dev = (struct device *)devarg;
> +	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
> +
> +	while (rd_reg32(&jrp->rregs->outring_used))
> +		caam_jr_consume(dev);
> +
>  	/* reenable / unmask IRQs */
>  	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);  } @@ -368,13
> +368,131 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,  }
> EXPORT_SYMBOL(caam_jr_enqueue);
> 
> +#ifdef CONFIG_PM
> +/* Return Failure for Job pending in input ring */ static void
> +caam_fail_inpjobs(struct device *dev) {
> +	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
> +	void (*usercall)(struct device *dev, u32 *desc, u32 status, void
> *arg);
> +	u32 *userdesc;
> +	void *userarg;
> +	int sw_idx;
> +
> +	/* Check for jobs left after reaching output ring and return error */
> +	for (sw_idx = 0; sw_idx < JOBR_DEPTH; sw_idx++) {
> +		if (jrp->entinfo[sw_idx].desc_addr_dma != 0) {
> +			usercall = jrp->entinfo[sw_idx].callbk;
> +			userarg = jrp->entinfo[sw_idx].cbkarg;
> +			userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
> +			usercall(dev, userdesc, -EIO, userarg);
> +			jrp->entinfo[sw_idx].desc_addr_dma = 0;
> +		}
> +	}
> +}
> +
> +/* Suspend handler for Job Ring */
> +static int jr_suspend(struct device *dev) {
> +	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
> +	unsigned int timeout = 100000;
> +	int ret = 0;
> +
> +	/*
> +	 * mask interrupts since we are going to poll
> +	 * for reset completion status
> +	 */
> +	setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
> +
> +	/*
> +	 * Cleanup all the pending completed Jobs to make room for
> +	 * in Job's coming to Outring during flush
> +	 */
> +	while (rd_reg32(&jrp->rregs->outring_used))
> +		caam_jr_consume(dev);
> +
> +	/* initiate flush (required prior to reset) */
> +	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
> +	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
> +		JRINT_ERR_HALT_INPROGRESS) && --timeout)
> +		cpu_relax();
> +
> +	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
> +	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
> +		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
> +		ret = -EIO;
> +		goto err;
> +	}
> +
> +	/*
> +	 * Disallow any further addition in Job Ring by making input_ring
> +	 * size ZERO. If output complete ring processing try to enqueue
> +	 * more Job's back to JR, it will return -EBUSY
> +	 */
> +	wr_reg32(&jrp->rregs->inpring_size, 0);
> +
> +	while (rd_reg32(&jrp->rregs->outring_used))
> +		caam_jr_consume(dev);
> +
> +	caam_fail_inpjobs(dev);
> +
> +	/* initiate reset */
> +	timeout = 100000;
> +	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
> +	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
> +		cpu_relax();
> +
> +	if (timeout == 0) {
> +		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
> +		ret = -EIO;
> +		goto err;
> +	}
> +
> +err:
> +	/* unmask interrupts */
> +	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
> +	return ret;
> +}
> +
> +/* Resume handler for Job Ring */
> +static int jr_resume(struct device *dev) {
> +	struct caam_drv_private_jr *jrp;
> +
> +	jrp = dev_get_drvdata(dev);
> +
> +	memset(jrp->entinfo, 0, sizeof(struct caam_jrentry_info) *
> +JOBR_DEPTH);
> +
> +	/* Setup rings */
> +	jrp->inp_ring_write_index = 0;
> +	jrp->out_ring_read_index = 0;
> +	jrp->head = 0;
> +	jrp->tail = 0;
> +
> +	/* Setup ring base registers */
> +	wr_reg64(&jrp->rregs->inpring_base, jrp->inpbusaddr);
> +	wr_reg64(&jrp->rregs->outring_base, jrp->outbusaddr);
> +	/* Setup ring size */
> +	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
> +	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
> +
> +	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
> +		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
> +		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
> +	return 0;
> +}
> +
> +const struct dev_pm_ops jr_pm_ops = {
> +	.suspend = jr_suspend,
> +	.resume = jr_resume,
> +};
> +#endif /* CONFIG_PM */
> +
>  /*
>   * Init JobR independent of platform property detection
>   */
>  static int caam_jr_init(struct device *dev)  {
>  	struct caam_drv_private_jr *jrp;
> -	dma_addr_t inpbusaddr, outbusaddr;
>  	int i, error;
> 
>  	jrp = dev_get_drvdata(dev);
> @@ -397,10 +515,11 @@ static int caam_jr_init(struct device *dev)
>  		return error;
> 
>  	jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) *
> JOBR_DEPTH,
> -					  &inpbusaddr, GFP_KERNEL);
> +					  &jrp->inpbusaddr, GFP_KERNEL);
> 
>  	jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
> -					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
> +					  JOBR_DEPTH, &jrp->outbusaddr,
> +					  GFP_KERNEL);
> 
>  	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
>  			       GFP_KERNEL);
> @@ -412,17 +531,14 @@ static int caam_jr_init(struct device *dev)
>  		return -ENOMEM;
>  	}
> 
> -	for (i = 0; i < JOBR_DEPTH; i++)
> -		jrp->entinfo[i].desc_addr_dma = !0;
> -
>  	/* Setup rings */
>  	jrp->inp_ring_write_index = 0;
>  	jrp->out_ring_read_index = 0;
>  	jrp->head = 0;
>  	jrp->tail = 0;
> 
> -	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
> -	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
> +	wr_reg64(&jrp->rregs->inpring_base, jrp->inpbusaddr);
> +	wr_reg64(&jrp->rregs->outring_base, jrp->outbusaddr);
>  	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
>  	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
> 
> @@ -518,6 +634,9 @@ static struct platform_driver caam_jr_driver = {
>  		.name = "caam_jr",
>  		.owner = THIS_MODULE,
>  		.of_match_table = caam_jr_match,
> +#ifdef CONFIG_PM
> +		.pm = &jr_pm_ops,
> +#endif
>  	},
>  	.probe       = caam_jr_probe,
>  	.remove      = caam_jr_remove,
> --
> 1.8.1.2
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] crypto: caam - power management support for caam job-ring
  2014-03-20 18:50 [PATCH v3] crypto: caam - power management support for caam job-ring Yashpal Dutta
  2014-03-21  5:54 ` Ruchika Gupta
@ 2014-03-22 16:24 ` Ben Hutchings
  2014-03-22 17:35   ` yashpal.dutta
  2014-03-24 12:33   ` Horia Geantă
  1 sibling, 2 replies; 5+ messages in thread
From: Ben Hutchings @ 2014-03-22 16:24 UTC (permalink / raw)
  To: Yashpal Dutta; +Cc: linux-crypto, horia.geanta, vakul, ruchika.gupta, stable

[-- Attachment #1: Type: text/plain, Size: 761 bytes --]

On Fri, 2014-03-21 at 00:35 +0545, Yashpal Dutta wrote:
> Job ring is suspended gracefully and resume afresh.
> 
> Both Sleep (where device will remain powered-on) and Deep-sleep (where
> device will be powered-down are handled gracefully. Persistance sessions
> are not supported across deep-sleep.
> 
> Cc: stable@vger.kernel.org
> Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
> ---
>  drivers/crypto/caam/intern.h |   2 +
>  drivers/crypto/caam/jr.c     | 257 +++++++++++++++++++++++++++++++------------
>  2 files changed, 190 insertions(+), 69 deletions(-)
[...]

This is too big for stable; is a simpler fix possible?

Ben.

-- 
Ben Hutchings
I'm not a reverse psychological virus.  Please don't copy me into your sig.

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 811 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v3] crypto: caam - power management support for caam job-ring
  2014-03-22 16:24 ` Ben Hutchings
@ 2014-03-22 17:35   ` yashpal.dutta
  2014-03-24 12:33   ` Horia Geantă
  1 sibling, 0 replies; 5+ messages in thread
From: yashpal.dutta @ 2014-03-22 17:35 UTC (permalink / raw)
  To: Ben Hutchings
  Cc: linux-crypto, Horia.Geanta, Vakul Garg, Ruchika Gupta, stable

I tried to reduce the patch to smallest possible size which resulted in current patch. 

I didn’t find a way to compress or break it further to limit of stable tree patch expectations.

Yash
-----Original Message-----
From: Ben Hutchings [mailto:ben@decadent.org.uk] 
Sent: Saturday, March 22, 2014 9:54 PM
To: Dutta Yashpal-B05456
Cc: linux-crypto@vger.kernel.org; Geanta Neag Horia Ioan-B05471; Garg Vakul-B16394; Gupta Ruchika-R66431; stable@vger.kernel.org
Subject: Re: [PATCH v3] crypto: caam - power management support for caam job-ring

On Fri, 2014-03-21 at 00:35 +0545, Yashpal Dutta wrote:
> Job ring is suspended gracefully and resume afresh.
> 
> Both Sleep (where device will remain powered-on) and Deep-sleep (where 
> device will be powered-down are handled gracefully. Persistance 
> sessions are not supported across deep-sleep.
> 
> Cc: stable@vger.kernel.org
> Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
> ---
>  drivers/crypto/caam/intern.h |   2 +
>  drivers/crypto/caam/jr.c     | 257 +++++++++++++++++++++++++++++++------------
>  2 files changed, 190 insertions(+), 69 deletions(-)
[...]

This is too big for stable; is a simpler fix possible?

Ben.

--
Ben Hutchings
I'm not a reverse psychological virus.  Please don't copy me into your sig.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3] crypto: caam - power management support for caam job-ring
  2014-03-22 16:24 ` Ben Hutchings
  2014-03-22 17:35   ` yashpal.dutta
@ 2014-03-24 12:33   ` Horia Geantă
  1 sibling, 0 replies; 5+ messages in thread
From: Horia Geantă @ 2014-03-24 12:33 UTC (permalink / raw)
  To: Yashpal Dutta; +Cc: Ben Hutchings, linux-crypto, vakul, ruchika.gupta, stable

On 3/22/2014 6:24 PM, Ben Hutchings wrote:
> On Fri, 2014-03-21 at 00:35 +0545, Yashpal Dutta wrote:
>> Job ring is suspended gracefully and resume afresh.
>>
>> Both Sleep (where device will remain powered-on) and Deep-sleep (where
>> device will be powered-down are handled gracefully. Persistance sessions
>> are not supported across deep-sleep.
>>
>> Cc: stable@vger.kernel.org
>> Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
>> ---
>>   drivers/crypto/caam/intern.h |   2 +
>>   drivers/crypto/caam/jr.c     | 257 +++++++++++++++++++++++++++++++------------
>>   2 files changed, 190 insertions(+), 69 deletions(-)
> [...]
>
> This is too big for stable; is a simpler fix possible?
>

Besides size, I'd also question whether this is a bug fix or a feature.
 From Documentation/stable_kernel_rules.txt:
  - It must fix a problem that causes a build error (but not for things
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short,
something critical.

On top of that - does it apply cleanly && has it been tested with 
-stable kernels?

Regards,
Horia

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-03-24 12:33 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-03-20 18:50 [PATCH v3] crypto: caam - power management support for caam job-ring Yashpal Dutta
2014-03-21  5:54 ` Ruchika Gupta
2014-03-22 16:24 ` Ben Hutchings
2014-03-22 17:35   ` yashpal.dutta
2014-03-24 12:33   ` Horia Geantă

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.