stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Patch "dmaengine: mv_xor: bug fix for racing condition in descriptors cleanup" has been added to the 4.1-stable tree
@ 2015-07-31  0:20 gregkh
  0 siblings, 0 replies; only message in thread
From: gregkh @ 2015-07-31  0:20 UTC (permalink / raw)
  To: alior, gregkh, maxime.ripard, oferh, vinod.koul; +Cc: stable, stable-commits


This is a note to let you know that I've just added the patch titled

    dmaengine: mv_xor: bug fix for racing condition in descriptors cleanup

to the 4.1-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch
and it can be found in the queue-4.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@vger.kernel.org> know about it.


>From 9136291f1dbc1d4d1cacd2840fb35f4f3ce16c46 Mon Sep 17 00:00:00 2001
From: Lior Amsalem <alior@marvell.com>
Date: Tue, 26 May 2015 15:07:32 +0200
Subject: dmaengine: mv_xor: bug fix for racing condition in descriptors cleanup

From: Lior Amsalem <alior@marvell.com>

commit 9136291f1dbc1d4d1cacd2840fb35f4f3ce16c46 upstream.

This patch fixes a bug in the XOR driver where the cleanup function can be
called and free descriptors that never been processed by the engine (which
result in data errors).

The cleanup function will free descriptors based on the ownership bit in
the descriptors.

Fixes: ff7b04796d98 ("dmaengine: DMA engine driver for Marvell XOR engine")
Signed-off-by: Lior Amsalem <alior@marvell.com>
Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Reviewed-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

---
 drivers/dma/mv_xor.c |   74 ++++++++++++++++++++++++++++++++-------------------
 drivers/dma/mv_xor.h |    1 
 2 files changed, 48 insertions(+), 27 deletions(-)

--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -273,7 +273,8 @@ static void mv_xor_slot_cleanup(struct m
 	dma_cookie_t cookie = 0;
 	int busy = mv_chan_is_busy(mv_chan);
 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
-	int seen_current = 0;
+	int current_cleaned = 0;
+	struct mv_xor_desc *hw_desc;
 
 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
@@ -285,38 +286,57 @@ static void mv_xor_slot_cleanup(struct m
 
 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 					chain_node) {
-		prefetch(_iter);
-		prefetch(&_iter->async_tx);
 
-		/* do not advance past the current descriptor loaded into the
-		 * hardware channel, subsequent descriptors are either in
-		 * process or have not been submitted
-		 */
-		if (seen_current)
-			break;
-
-		/* stop the search if we reach the current descriptor and the
-		 * channel is busy
-		 */
-		if (iter->async_tx.phys == current_desc) {
-			seen_current = 1;
-			if (busy)
+		/* clean finished descriptors */
+		hw_desc = iter->hw_desc;
+		if (hw_desc->status & XOR_DESC_SUCCESS) {
+			cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
+								cookie);
+
+			/* done processing desc, clean slot */
+			mv_xor_clean_slot(iter, mv_chan);
+
+			/* break if we did cleaned the current */
+			if (iter->async_tx.phys == current_desc) {
+				current_cleaned = 1;
 				break;
+			}
+		} else {
+			if (iter->async_tx.phys == current_desc) {
+				current_cleaned = 0;
+				break;
+			}
 		}
-
-		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
-
-		if (mv_xor_clean_slot(iter, mv_chan))
-			break;
 	}
 
 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
-		struct mv_xor_desc_slot *chain_head;
-		chain_head = list_entry(mv_chan->chain.next,
-					struct mv_xor_desc_slot,
-					chain_node);
-
-		mv_xor_start_new_chain(mv_chan, chain_head);
+		if (current_cleaned) {
+			/*
+			 * current descriptor cleaned and removed, run
+			 * from list head
+			 */
+			iter = list_entry(mv_chan->chain.next,
+					  struct mv_xor_desc_slot,
+					  chain_node);
+			mv_xor_start_new_chain(mv_chan, iter);
+		} else {
+			if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
+				/*
+				 * descriptors are still waiting after
+				 * current, trigger them
+				 */
+				iter = list_entry(iter->chain_node.next,
+						  struct mv_xor_desc_slot,
+						  chain_node);
+				mv_xor_start_new_chain(mv_chan, iter);
+			} else {
+				/*
+				 * some descriptors are still waiting
+				 * to be cleaned
+				 */
+				tasklet_schedule(&mv_chan->irq_tasklet);
+			}
+		}
 	}
 
 	if (cookie > 0)
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -31,6 +31,7 @@
 #define XOR_OPERATION_MODE_XOR		0
 #define XOR_OPERATION_MODE_MEMCPY	2
 #define XOR_DESCRIPTOR_SWAP		BIT(14)
+#define XOR_DESC_SUCCESS		0x40000000
 
 #define XOR_DESC_DMA_OWNED		BIT(31)
 #define XOR_DESC_EOD_INT_EN		BIT(31)


Patches currently in stable-queue which might be from alior@marvell.com are

queue-4.1/dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2015-07-31  0:20 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-07-31  0:20 Patch "dmaengine: mv_xor: bug fix for racing condition in descriptors cleanup" has been added to the 4.1-stable tree gregkh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).