linux-mmc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH V1] mmc: mmc_test: Pass different sg lists for non-blocking requests
@ 2020-02-11  9:36 Veerabhadrarao Badiganti
  2020-02-11 10:58 ` Veerabhadrarao Badiganti
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Veerabhadrarao Badiganti @ 2020-02-11  9:36 UTC (permalink / raw)
  To: ulf.hansson, adrian.hunter
  Cc: asutoshd, stummala, sayalil, cang, rampraka, linux-mmc,
	linux-kernel, linux-arm-msm, Veerabhadrarao Badiganti,
	Greg Kroah-Hartman, Allison Randal, Thomas Gleixner

Supply a separate sg list for each of the request in non-blocking
IO test cases where two requests will be issued at same time.

Otherwise, sg memory may get unmapped when a request is done while
same memory is being accessed by controller from the other request,
and it leads to iommu errors with below call stack:

	__arm_lpae_unmap+0x2e0/0x478
	arm_lpae_unmap+0x54/0x70
	arm_smmu_unmap+0x64/0xa4
	__iommu_unmap+0xb8/0x1f0
	iommu_unmap_fast+0x38/0x48
	__iommu_dma_unmap+0x88/0x108
	iommu_dma_unmap_sg+0x90/0xa4
	sdhci_post_req+0x5c/0x78
	mmc_test_start_areq+0x10c/0x120 [mmc_test]
	mmc_test_area_io_seq+0x150/0x264 [mmc_test]
	mmc_test_rw_multiple+0x174/0x1c0 [mmc_test]
	mmc_test_rw_multiple_sg_len+0x44/0x6c [mmc_test]
	mmc_test_profile_sglen_wr_nonblock_perf+0x6c/0x94 [mmc_test]
	mtf_test_write+0x238/0x3cc [mmc_test]

Signed-off-by: Veerabhadrarao Badiganti <vbadigan@codeaurora.org>
---
 drivers/mmc/core/mmc_test.c | 38 ++++++++++++++++++++++++++++++++------
 1 file changed, 32 insertions(+), 6 deletions(-)

diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 492dd45..69bdf60 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -71,6 +71,7 @@ struct mmc_test_mem {
  * @sg_len: length of currently mapped scatterlist @sg
  * @mem: allocated memory
  * @sg: scatterlist
+ * @sg_areq: scatterlist for non blocking request
  */
 struct mmc_test_area {
 	unsigned long max_sz;
@@ -82,6 +83,7 @@ struct mmc_test_area {
 	unsigned int sg_len;
 	struct mmc_test_mem *mem;
 	struct scatterlist *sg;
+	struct scatterlist *sg_areq;
 };
 
 /**
@@ -836,7 +838,9 @@ static int mmc_test_start_areq(struct mmc_test_card *test,
 }
 
 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
-				      struct scatterlist *sg, unsigned sg_len,
+				      struct scatterlist *sg,
+				      struct scatterlist *sg_areq,
+				      unsigned int sg_len,
 				      unsigned dev_addr, unsigned blocks,
 				      unsigned blksz, int write, int count)
 {
@@ -867,6 +871,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
 			prev_mrq = &rq2->mrq;
 
 		swap(mrq, prev_mrq);
+		swap(sg, sg_areq);
 		dev_addr += blocks;
 	}
 
@@ -1396,7 +1401,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
  * Map sz bytes so that it can be transferred.
  */
 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
-			     int max_scatter, int min_sg_len)
+			     int max_scatter, int min_sg_len, bool nonblock)
 {
 	struct mmc_test_area *t = &test->area;
 	int err;
@@ -1411,6 +1416,20 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
 		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
 				      t->max_seg_sz, &t->sg_len, min_sg_len);
 	}
+
+	if (err || !nonblock)
+		goto err;
+
+	if (max_scatter) {
+		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
+						  t->max_segs, t->max_seg_sz,
+				       &t->sg_len);
+	} else {
+		err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
+				      t->max_seg_sz, &t->sg_len, min_sg_len);
+	}
+
+err:
 	if (err)
 		pr_info("%s: Failed to map sg list\n",
 		       mmc_hostname(test->card->host));
@@ -1458,15 +1477,16 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
 			sz = max_tfr;
 	}
 
-	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
+	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
 	if (ret)
 		return ret;
 
 	if (timed)
 		ktime_get_ts64(&ts1);
 	if (nonblock)
-		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
-				 dev_addr, t->blocks, 512, write, count);
+		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_areq,
+				 t->sg_len, dev_addr, t->blocks, 512, write,
+				 count);
 	else
 		for (i = 0; i < count && ret == 0; i++) {
 			ret = mmc_test_area_transfer(test, dev_addr, write);
@@ -1584,6 +1604,12 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
 		goto out_free;
 	}
 
+	t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
+	if (!t->sg_areq) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
 	t->dev_addr = mmc_test_capacity(test->card) / 2;
 	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
 
@@ -2468,7 +2494,7 @@ static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
 	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
 		return RESULT_UNSUP_HOST;
 
-	ret = mmc_test_area_map(test, sz, 0, 0);
+	ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
 	if (ret)
 		return ret;
 
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc., is a member of Code Aurora Forum, a Linux Foundation Collaborative Project

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-03-04 15:35 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-11  9:36 [PATCH V1] mmc: mmc_test: Pass different sg lists for non-blocking requests Veerabhadrarao Badiganti
2020-02-11 10:58 ` Veerabhadrarao Badiganti
2020-02-18  8:55 ` Adrian Hunter
2020-02-19  9:44 ` [PATCH V2] " Veerabhadrarao Badiganti
2020-02-20  8:39   ` Sai Prakash Ranjan
2020-02-21  1:25   ` Stephen Boyd
2020-02-26 10:57   ` [PATCH V3] " Veerabhadrarao Badiganti
2020-02-27 12:57     ` Adrian Hunter
2020-03-04 15:34     ` Ulf Hansson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).