All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mmc: sh_mmcif: process requests asynchronously
@ 2011-11-30 15:07 ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-11-30 15:07 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This patch converts the sh_mmcif MMC host driver to process requests 
asynchronously instead of waiting in its .request() method for completion. 
This is achieved by using threaded IRQs.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 824fee5..d077da4 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -123,6 +124,11 @@
 #define MASK_MRBSYTO		(1 << 1)
 #define MASK_MRSPTO		(1 << 0)
 
+#define START_CMD_MASK		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+				 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
 /* CE_HOST_STS1 */
 #define STS1_CMDSEQ		(1 << 31)
 
@@ -162,19 +169,38 @@ enum mmcif_state {
 	STATE_IOS,
 };
 
+enum mmcif_wait_for {
+	MMCIF_WAIT_FOR_REQUEST,
+	MMCIF_WAIT_FOR_CMD,
+	MMCIF_WAIT_FOR_MREAD,
+	MMCIF_WAIT_FOR_MWRITE,
+	MMCIF_WAIT_FOR_READ,
+	MMCIF_WAIT_FOR_WRITE,
+	MMCIF_WAIT_FOR_READ_END,
+	MMCIF_WAIT_FOR_WRITE_END,
+	MMCIF_WAIT_FOR_STOP,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
+	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct clk *hclk;
 	unsigned int clk;
 	int bus_width;
 	bool sd_error;
+	bool dying;
 	long timeout;
 	void __iomem *addr;
-	struct completion intr_wait;
+	u32 *pio_ptr;
+	spinlock_t lock;		/* protect host->state */
 	enum mmcif_state state;
-	spinlock_t lock;
+	enum mmcif_wait_for wait_for;
+	struct delayed_work timeout_work;
+	size_t blocksize;
+	int sg_idx;
+	int sg_blkidx;
 	bool power;
 	bool card_present;
 
@@ -409,7 +435,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 {
 	u32 state1, state2;
-	int ret, timeout = 10000000;
+	int ret, timeout;
 
 	host->sd_error = false;
 
@@ -421,155 +447,97 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
-		while (1) {
-			timeout--;
-			if (timeout < 0) {
-				dev_err(&host->pd->dev,
-					"Forceed end of command sequence timeout err\n");
-				return -EIO;
-			}
+		for (timeout = 10000000; timeout; timeout--) {
 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
-								& STS1_CMDSEQ))
+			      & STS1_CMDSEQ))
 				break;
 			mdelay(1);
 		}
+		if (!timeout) {
+			dev_err(&host->pd->dev,
+				"Forceed end of command sequence timeout err\n");
+			return -EIO;
+		}
 		sh_mmcif_sync_reset(host);
 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
+		dev_dbg(&host->pd->dev, ": CRC error\n");
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
+		dev_dbg(&host->pd->dev, ": Timeout\n");
 		ret = -ETIMEDOUT;
 	} else {
-		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
+		dev_dbg(&host->pd->dev, ": End/Index error\n");
 		ret = -EIO;
 	}
 	return ret;
 }
 
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+				 struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
+
+	host->wait_for = MMCIF_WAIT_FOR_READ;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
-		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
-
-	/* buffer read end */
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
 }
 
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, j, sec, *p;
-
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-			/* buf read enable */
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
-
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
-
-			for (i = 0; i < blocksize / 4; i++)
-				*p++ = sh_mmcif_readl(host->addr,
-						      MMCIF_CE_DATA);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
-	}
-	return 0;
+
+	if (!data->sg_len || !data->sg->length)
+		return;
+
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
+
+	host->wait_for = MMCIF_WAIT_FOR_MREAD;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 }
 
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 					struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
 
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf write enable */
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
-		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
-
-	/* buffer write end */
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
-
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 }
 
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
-						struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 i, sec, j, blocksize, *p;
 
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
-			/* buf write enable*/
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
+	if (!data->sg_len || !data->sg->length)
+		return;
 
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
 
-			for (i = 0; i < blocksize / 4; i++)
-				sh_mmcif_writel(host->addr,
-						MMCIF_CE_DATA, *p++);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
-	}
-	return 0;
+	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 }
 
 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -666,57 +634,48 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
-				struct mmc_request *mrq, u32 opc)
+			       struct mmc_request *mrq, u32 opc)
 {
-	int ret;
-
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_read(host, mrq);
-		break;
+		sh_mmcif_multi_read(host, mrq);
+		return 0;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_write(host, mrq);
-		break;
+		sh_mmcif_multi_write(host, mrq);
+		return 0;
 	case MMC_WRITE_BLOCK:
-		ret = sh_mmcif_single_write(host, mrq);
-		break;
+		sh_mmcif_single_write(host, mrq);
+		return 0;
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		ret = sh_mmcif_single_read(host, mrq);
-		break;
-	default:
-		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
-		ret = -EINVAL;
-		break;
+		sh_mmcif_single_read(host, mrq);
+		return 0;
 	}
-	return ret;
+	dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
+	return -EINVAL;
 }
 
 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
-			struct mmc_request *mrq, struct mmc_command *cmd)
+			       struct mmc_request *mrq)
 {
-	long time;
-	int ret = 0, mask = 0;
+	struct mmc_command *cmd = mrq->cmd;
 	u32 opc = cmd->opcode;
+	u32 mask;
 
 	switch (opc) {
-	/* respons busy check */
+	/* response busy check */
 	case MMC_SWITCH:
 	case MMC_STOP_TRANSMISSION:
 	case MMC_SET_WRITE_PROT:
 	case MMC_CLR_WRITE_PROT:
 	case MMC_ERASE:
 	case MMC_GEN_CMD:
-		mask = MASK_MRBSYE;
+		mask = START_CMD_MASK | MASK_MRBSYE;
 		break;
 	default:
-		mask = MASK_MCRSPE;
+		mask = START_CMD_MASK | MASK_MCRSPE;
 		break;
 	}
-	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
-		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
-		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
-		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
 
 	if (host->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
@@ -732,61 +691,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-		host->timeout);
-	if (time <= 0) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	if (host->sd_error) {
-		switch (cmd->opcode) {
-		case MMC_ALL_SEND_CID:
-		case MMC_SELECT_CARD:
-		case MMC_APP_CMD:
-			cmd->error = -ETIMEDOUT;
-			break;
-		default:
-			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
-					cmd->opcode);
-			cmd->error = sh_mmcif_error_manage(host);
-			break;
-		}
-		host->sd_error = false;
-		return;
-	}
-	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = 0;
-		return;
-	}
-	sh_mmcif_get_response(host, cmd);
-	if (host->data) {
-		if (!host->dma_active) {
-			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
-		} else {
-			long time -				wait_for_completion_interruptible_timeout(&host->dma_complete,
-									  host->timeout);
-			if (!time)
-				ret = -ETIMEDOUT;
-			else if (time < 0)
-				ret = time;
-			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
-					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
-			host->dma_active = false;
-		}
-		if (ret < 0)
-			mrq->data->bytes_xfered = 0;
-		else
-			mrq->data->bytes_xfered -				mrq->data->blocks * mrq->data->blksz;
-	}
-	cmd->error = ret;
+	host->wait_for = MMCIF_WAIT_FOR_CMD;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
-		struct mmc_request *mrq, struct mmc_command *cmd)
+			      struct mmc_request *mrq)
 {
-	long time;
+	struct mmc_command *cmd = mrq->stop;
 
 	if (mrq->cmd->opcode = MMC_READ_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -798,14 +710,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		return;
 	}
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	sh_mmcif_get_cmd12response(host, cmd);
-	cmd->error = 0;
+	host->wait_for = MMCIF_WAIT_FOR_STOP;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -813,6 +719,8 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	struct sh_mmcif_host *host = mmc_priv(mmc);
 	unsigned long flags;
 
+	dev_info(mmc->parent, "%s(%u), data %p\n", __func__, mrq->cmd->opcode, mrq->data);
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (host->state != STATE_IDLE) {
 		spin_unlock_irqrestore(&host->lock, flags);
@@ -844,23 +752,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	default:
 		break;
 	}
+
+	host->mrq = mrq;
 	host->data = mrq->data;
-	if (mrq->data) {
-		if (mrq->data->flags & MMC_DATA_READ) {
-			if (host->chan_rx)
-				sh_mmcif_start_dma_rx(host);
-		} else {
-			if (host->chan_tx)
-				sh_mmcif_start_dma_tx(host);
-		}
-	}
-	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
-	host->data = NULL;
 
-	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
-	host->state = STATE_IDLE;
-	mmc_request_done(mmc, mrq);
+	sh_mmcif_start_cmd(host, mrq);
 }
 
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -935,9 +831,268 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
-static void sh_mmcif_detect(struct mmc_host *mmc)
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+	struct mmc_command *cmd = host->mrq->cmd;
+	long time;
+
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			host->sd_error = false;
+			break;
+		default:
+			cmd->error = sh_mmcif_error_manage(host);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+				cmd->opcode, cmd->error);
+			break;
+		}
+		return false;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = 0;
+		return false;
+	}
+
+	sh_mmcif_get_response(host, cmd);
+
+	if (!host->data)
+		return false;
+
+	if (host->mrq->data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			sh_mmcif_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			sh_mmcif_start_dma_tx(host);
+	}
+
+	if (!host->dma_active) {
+		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!host->data->error)
+			return true;
+		return false;
+	}
+
+	/* Running in the IRQ thread, can sleep */
+	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+							 host->timeout);
+	if (host->sd_error) {
+		/* Woken up by an error IRQ: abort DMA */
+		if (host->data->flags & MMC_DATA_READ)
+			dmaengine_terminate_all(host->chan_rx);
+		else
+			dmaengine_terminate_all(host->chan_tx);
+		host->data->error = sh_mmcif_error_manage(host);
+	} else if (!time) {
+		host->data->error = -ETIMEDOUT;
+	} else if (time < 0) {
+		host->data->error = time;
+	}
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	host->dma_active = false;
+
+	if (host->data->error)
+		host->data->bytes_xfered = 0;
+
+	return false;
+}
+
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 {
-	mmc_detect_change(mmc, 0);
+	struct mmc_data *data = host->mrq->data;
+
+	host->sg_blkidx += host->blocksize;
+
+	/* data->sg->length must be a multiple of host->blocksize? */
+	BUG_ON(host->sg_blkidx > data->sg->length);
+
+	if (host->sg_blkidx = data->sg->length) {
+		host->sg_blkidx = 0;
+		if (++host->sg_idx < data->sg_len)
+			host->pio_ptr = sg_virt(++data->sg);
+	} else {
+		host->pio_ptr = p;
+	}
+
+	if (host->sg_idx = data->sg_len)
+		return false;
+
+	return true;
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+	return true;
+}
+
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	return true;
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	/* buffer read end */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+	host->wait_for = MMCIF_WAIT_FOR_READ_END;
+
+	return true;
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	/* buffer write end */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
+
+	return true;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	struct mmc_request *mrq = host->mrq;
+
+	cancel_delayed_work_sync(&host->timeout_work);
+
+	/*
+	 * All handlers return true, if processing continues, and false, if the
+	 * request has to be completed - successfully or not
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_REQUEST:
+		/* We're too late, the timeout has already kicked in */
+		return IRQ_HANDLED;
+	case MMCIF_WAIT_FOR_CMD:
+		if (sh_mmcif_end_cmd(host))
+			/* Wait for data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+		if (sh_mmcif_mread_block(host))
+			/* Wait for more data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_READ:
+		if (sh_mmcif_read_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MWRITE:
+		if (sh_mmcif_mwrite_block(host))
+			/* Wait data to write */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_WRITE:
+		if (sh_mmcif_write_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		if (host->sd_error) {
+			mrq->stop->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		sh_mmcif_get_cmd12response(host, mrq->stop);
+		mrq->stop->error = 0;
+		break;
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		if (host->sd_error)
+			mrq->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+		host->data = NULL;
+
+		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
+			mrq->data->bytes_xfered +				mrq->data->blocks * mrq->data->blksz;
+
+		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+			sh_mmcif_stop_cmd(host, mrq);
+			if (!mrq->stop->error)
+				return IRQ_HANDLED;
+		}
+	}
+
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->state = STATE_IDLE;
+	mmc_request_done(host->mmc, mrq);
+
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
@@ -991,14 +1146,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		host->sd_error = true;
 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
-		complete(&host->intr_wait);
-	else
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+		if (host->dma_active)
+			mmcif_dma_complete(host);
+		else
+			return IRQ_WAKE_THREAD;
+	} else {
 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+	}
 
 	return IRQ_HANDLED;
 }
 
+static void mmcif_timeout_work(struct work_struct *work)
+{
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+	struct mmc_request *mrq = host->mrq;
+
+	if (host->dying)
+		/* Don't run after mmc_remove_host() */
+		return;
+
+	/*
+	 * Handle races with cancel_delayed_work(), unless
+	 * cancel_delayed_work_sync() is used
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_CMD:
+		mrq->cmd->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		mrq->stop->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+	case MMCIF_WAIT_FOR_MWRITE:
+	case MMCIF_WAIT_FOR_READ:
+	case MMCIF_WAIT_FOR_WRITE:
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		host->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	host->state = STATE_IDLE;
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->data = NULL;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 {
 	int ret = 0, irq[2];
@@ -1052,7 +1251,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_completion(&host->intr_wait);
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
@@ -1089,19 +1287,21 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
-	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up3;
 	}
-	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up3;
 	}
 
-	sh_mmcif_detect(host->mmc);
+	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
+	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
@@ -1127,11 +1327,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	host->dying = true;
 	pm_runtime_get_sync(&pdev->dev);
 
 	mmc_remove_host(host->mmc);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
+	/*
+	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+	 * mmc_remove_host() call above. But swapping order doesn't help either
+	 * (a query on the linux-mmc mailing list didn't bring any replies).
+	 */
+	cancel_delayed_work_sync(&host->timeout_work);
+
 	if (host->addr)
 		iounmap(host->addr);
 

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH] mmc: sh_mmcif: process requests asynchronously
@ 2011-11-30 15:07 ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-11-30 15:07 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This patch converts the sh_mmcif MMC host driver to process requests 
asynchronously instead of waiting in its .request() method for completion. 
This is achieved by using threaded IRQs.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 824fee5..d077da4 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -123,6 +124,11 @@
 #define MASK_MRBSYTO		(1 << 1)
 #define MASK_MRSPTO		(1 << 0)
 
+#define START_CMD_MASK		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+				 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
 /* CE_HOST_STS1 */
 #define STS1_CMDSEQ		(1 << 31)
 
@@ -162,19 +169,38 @@ enum mmcif_state {
 	STATE_IOS,
 };
 
+enum mmcif_wait_for {
+	MMCIF_WAIT_FOR_REQUEST,
+	MMCIF_WAIT_FOR_CMD,
+	MMCIF_WAIT_FOR_MREAD,
+	MMCIF_WAIT_FOR_MWRITE,
+	MMCIF_WAIT_FOR_READ,
+	MMCIF_WAIT_FOR_WRITE,
+	MMCIF_WAIT_FOR_READ_END,
+	MMCIF_WAIT_FOR_WRITE_END,
+	MMCIF_WAIT_FOR_STOP,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
+	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct clk *hclk;
 	unsigned int clk;
 	int bus_width;
 	bool sd_error;
+	bool dying;
 	long timeout;
 	void __iomem *addr;
-	struct completion intr_wait;
+	u32 *pio_ptr;
+	spinlock_t lock;		/* protect host->state */
 	enum mmcif_state state;
-	spinlock_t lock;
+	enum mmcif_wait_for wait_for;
+	struct delayed_work timeout_work;
+	size_t blocksize;
+	int sg_idx;
+	int sg_blkidx;
 	bool power;
 	bool card_present;
 
@@ -409,7 +435,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 {
 	u32 state1, state2;
-	int ret, timeout = 10000000;
+	int ret, timeout;
 
 	host->sd_error = false;
 
@@ -421,155 +447,97 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
-		while (1) {
-			timeout--;
-			if (timeout < 0) {
-				dev_err(&host->pd->dev,
-					"Forceed end of command sequence timeout err\n");
-				return -EIO;
-			}
+		for (timeout = 10000000; timeout; timeout--) {
 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
-								& STS1_CMDSEQ))
+			      & STS1_CMDSEQ))
 				break;
 			mdelay(1);
 		}
+		if (!timeout) {
+			dev_err(&host->pd->dev,
+				"Forceed end of command sequence timeout err\n");
+			return -EIO;
+		}
 		sh_mmcif_sync_reset(host);
 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
+		dev_dbg(&host->pd->dev, ": CRC error\n");
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
+		dev_dbg(&host->pd->dev, ": Timeout\n");
 		ret = -ETIMEDOUT;
 	} else {
-		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
+		dev_dbg(&host->pd->dev, ": End/Index error\n");
 		ret = -EIO;
 	}
 	return ret;
 }
 
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+				 struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
+
+	host->wait_for = MMCIF_WAIT_FOR_READ;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
-		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
-
-	/* buffer read end */
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
 }
 
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, j, sec, *p;
-
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-			/* buf read enable */
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
-
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
-
-			for (i = 0; i < blocksize / 4; i++)
-				*p++ = sh_mmcif_readl(host->addr,
-						      MMCIF_CE_DATA);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
-	}
-	return 0;
+
+	if (!data->sg_len || !data->sg->length)
+		return;
+
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
+
+	host->wait_for = MMCIF_WAIT_FOR_MREAD;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 }
 
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 					struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
 
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf write enable */
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
-		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
-
-	/* buffer write end */
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
-
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 }
 
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
-						struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 i, sec, j, blocksize, *p;
 
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
-			/* buf write enable*/
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
+	if (!data->sg_len || !data->sg->length)
+		return;
 
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
 
-			for (i = 0; i < blocksize / 4; i++)
-				sh_mmcif_writel(host->addr,
-						MMCIF_CE_DATA, *p++);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
-	}
-	return 0;
+	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 }
 
 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -666,57 +634,48 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
-				struct mmc_request *mrq, u32 opc)
+			       struct mmc_request *mrq, u32 opc)
 {
-	int ret;
-
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_read(host, mrq);
-		break;
+		sh_mmcif_multi_read(host, mrq);
+		return 0;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_write(host, mrq);
-		break;
+		sh_mmcif_multi_write(host, mrq);
+		return 0;
 	case MMC_WRITE_BLOCK:
-		ret = sh_mmcif_single_write(host, mrq);
-		break;
+		sh_mmcif_single_write(host, mrq);
+		return 0;
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		ret = sh_mmcif_single_read(host, mrq);
-		break;
-	default:
-		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
-		ret = -EINVAL;
-		break;
+		sh_mmcif_single_read(host, mrq);
+		return 0;
 	}
-	return ret;
+	dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
+	return -EINVAL;
 }
 
 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
-			struct mmc_request *mrq, struct mmc_command *cmd)
+			       struct mmc_request *mrq)
 {
-	long time;
-	int ret = 0, mask = 0;
+	struct mmc_command *cmd = mrq->cmd;
 	u32 opc = cmd->opcode;
+	u32 mask;
 
 	switch (opc) {
-	/* respons busy check */
+	/* response busy check */
 	case MMC_SWITCH:
 	case MMC_STOP_TRANSMISSION:
 	case MMC_SET_WRITE_PROT:
 	case MMC_CLR_WRITE_PROT:
 	case MMC_ERASE:
 	case MMC_GEN_CMD:
-		mask = MASK_MRBSYE;
+		mask = START_CMD_MASK | MASK_MRBSYE;
 		break;
 	default:
-		mask = MASK_MCRSPE;
+		mask = START_CMD_MASK | MASK_MCRSPE;
 		break;
 	}
-	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
-		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
-		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
-		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
 
 	if (host->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
@@ -732,61 +691,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-		host->timeout);
-	if (time <= 0) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	if (host->sd_error) {
-		switch (cmd->opcode) {
-		case MMC_ALL_SEND_CID:
-		case MMC_SELECT_CARD:
-		case MMC_APP_CMD:
-			cmd->error = -ETIMEDOUT;
-			break;
-		default:
-			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
-					cmd->opcode);
-			cmd->error = sh_mmcif_error_manage(host);
-			break;
-		}
-		host->sd_error = false;
-		return;
-	}
-	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = 0;
-		return;
-	}
-	sh_mmcif_get_response(host, cmd);
-	if (host->data) {
-		if (!host->dma_active) {
-			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
-		} else {
-			long time =
-				wait_for_completion_interruptible_timeout(&host->dma_complete,
-									  host->timeout);
-			if (!time)
-				ret = -ETIMEDOUT;
-			else if (time < 0)
-				ret = time;
-			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
-					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
-			host->dma_active = false;
-		}
-		if (ret < 0)
-			mrq->data->bytes_xfered = 0;
-		else
-			mrq->data->bytes_xfered =
-				mrq->data->blocks * mrq->data->blksz;
-	}
-	cmd->error = ret;
+	host->wait_for = MMCIF_WAIT_FOR_CMD;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
-		struct mmc_request *mrq, struct mmc_command *cmd)
+			      struct mmc_request *mrq)
 {
-	long time;
+	struct mmc_command *cmd = mrq->stop;
 
 	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -798,14 +710,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		return;
 	}
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	sh_mmcif_get_cmd12response(host, cmd);
-	cmd->error = 0;
+	host->wait_for = MMCIF_WAIT_FOR_STOP;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -813,6 +719,8 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	struct sh_mmcif_host *host = mmc_priv(mmc);
 	unsigned long flags;
 
+	dev_info(mmc->parent, "%s(%u), data %p\n", __func__, mrq->cmd->opcode, mrq->data);
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (host->state != STATE_IDLE) {
 		spin_unlock_irqrestore(&host->lock, flags);
@@ -844,23 +752,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	default:
 		break;
 	}
+
+	host->mrq = mrq;
 	host->data = mrq->data;
-	if (mrq->data) {
-		if (mrq->data->flags & MMC_DATA_READ) {
-			if (host->chan_rx)
-				sh_mmcif_start_dma_rx(host);
-		} else {
-			if (host->chan_tx)
-				sh_mmcif_start_dma_tx(host);
-		}
-	}
-	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
-	host->data = NULL;
 
-	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
-	host->state = STATE_IDLE;
-	mmc_request_done(mmc, mrq);
+	sh_mmcif_start_cmd(host, mrq);
 }
 
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -935,9 +831,268 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
-static void sh_mmcif_detect(struct mmc_host *mmc)
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+	struct mmc_command *cmd = host->mrq->cmd;
+	long time;
+
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			host->sd_error = false;
+			break;
+		default:
+			cmd->error = sh_mmcif_error_manage(host);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+				cmd->opcode, cmd->error);
+			break;
+		}
+		return false;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = 0;
+		return false;
+	}
+
+	sh_mmcif_get_response(host, cmd);
+
+	if (!host->data)
+		return false;
+
+	if (host->mrq->data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			sh_mmcif_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			sh_mmcif_start_dma_tx(host);
+	}
+
+	if (!host->dma_active) {
+		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!host->data->error)
+			return true;
+		return false;
+	}
+
+	/* Running in the IRQ thread, can sleep */
+	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+							 host->timeout);
+	if (host->sd_error) {
+		/* Woken up by an error IRQ: abort DMA */
+		if (host->data->flags & MMC_DATA_READ)
+			dmaengine_terminate_all(host->chan_rx);
+		else
+			dmaengine_terminate_all(host->chan_tx);
+		host->data->error = sh_mmcif_error_manage(host);
+	} else if (!time) {
+		host->data->error = -ETIMEDOUT;
+	} else if (time < 0) {
+		host->data->error = time;
+	}
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	host->dma_active = false;
+
+	if (host->data->error)
+		host->data->bytes_xfered = 0;
+
+	return false;
+}
+
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 {
-	mmc_detect_change(mmc, 0);
+	struct mmc_data *data = host->mrq->data;
+
+	host->sg_blkidx += host->blocksize;
+
+	/* data->sg->length must be a multiple of host->blocksize? */
+	BUG_ON(host->sg_blkidx > data->sg->length);
+
+	if (host->sg_blkidx == data->sg->length) {
+		host->sg_blkidx = 0;
+		if (++host->sg_idx < data->sg_len)
+			host->pio_ptr = sg_virt(++data->sg);
+	} else {
+		host->pio_ptr = p;
+	}
+
+	if (host->sg_idx == data->sg_len)
+		return false;
+
+	return true;
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+	return true;
+}
+
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	return true;
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	/* buffer read end */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+	host->wait_for = MMCIF_WAIT_FOR_READ_END;
+
+	return true;
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	/* buffer write end */
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
+
+	return true;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	struct mmc_request *mrq = host->mrq;
+
+	cancel_delayed_work_sync(&host->timeout_work);
+
+	/*
+	 * All handlers return true, if processing continues, and false, if the
+	 * request has to be completed - successfully or not
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_REQUEST:
+		/* We're too late, the timeout has already kicked in */
+		return IRQ_HANDLED;
+	case MMCIF_WAIT_FOR_CMD:
+		if (sh_mmcif_end_cmd(host))
+			/* Wait for data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+		if (sh_mmcif_mread_block(host))
+			/* Wait for more data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_READ:
+		if (sh_mmcif_read_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MWRITE:
+		if (sh_mmcif_mwrite_block(host))
+			/* Wait data to write */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_WRITE:
+		if (sh_mmcif_write_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		if (host->sd_error) {
+			mrq->stop->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		sh_mmcif_get_cmd12response(host, mrq->stop);
+		mrq->stop->error = 0;
+		break;
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		if (host->sd_error)
+			mrq->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+		host->data = NULL;
+
+		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
+			mrq->data->bytes_xfered =
+				mrq->data->blocks * mrq->data->blksz;
+
+		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+			sh_mmcif_stop_cmd(host, mrq);
+			if (!mrq->stop->error)
+				return IRQ_HANDLED;
+		}
+	}
+
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->state = STATE_IDLE;
+	mmc_request_done(host->mmc, mrq);
+
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
@@ -991,14 +1146,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		host->sd_error = true;
 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
-		complete(&host->intr_wait);
-	else
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+		if (host->dma_active)
+			mmcif_dma_complete(host);
+		else
+			return IRQ_WAKE_THREAD;
+	} else {
 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+	}
 
 	return IRQ_HANDLED;
 }
 
+static void mmcif_timeout_work(struct work_struct *work)
+{
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+	struct mmc_request *mrq = host->mrq;
+
+	if (host->dying)
+		/* Don't run after mmc_remove_host() */
+		return;
+
+	/*
+	 * Handle races with cancel_delayed_work(), unless
+	 * cancel_delayed_work_sync() is used
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_CMD:
+		mrq->cmd->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		mrq->stop->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+	case MMCIF_WAIT_FOR_MWRITE:
+	case MMCIF_WAIT_FOR_READ:
+	case MMCIF_WAIT_FOR_WRITE:
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		host->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	host->state = STATE_IDLE;
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->data = NULL;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 {
 	int ret = 0, irq[2];
@@ -1052,7 +1251,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_completion(&host->intr_wait);
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
@@ -1089,19 +1287,21 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
-	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up3;
 	}
-	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up3;
 	}
 
-	sh_mmcif_detect(host->mmc);
+	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
+	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
@@ -1127,11 +1327,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	host->dying = true;
 	pm_runtime_get_sync(&pdev->dev);
 
 	mmc_remove_host(host->mmc);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
+	/*
+	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+	 * mmc_remove_host() call above. But swapping order doesn't help either
+	 * (a query on the linux-mmc mailing list didn't bring any replies).
+	 */
+	cancel_delayed_work_sync(&host->timeout_work);
+
 	if (host->addr)
 		iounmap(host->addr);
 

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH] mmc: sh_mmcif: process requests asynchronously
  2011-11-30 15:07 ` Guennadi Liakhovetski
@ 2011-12-02 15:24   ` Magnus Damm
  -1 siblings, 0 replies; 30+ messages in thread
From: Magnus Damm @ 2011-12-02 15:24 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Chris Ball

Hi Guennadi,

On Thu, Dec 1, 2011 at 12:07 AM, Guennadi Liakhovetski
<g.liakhovetski@gmx.de> wrote:
> This patch converts the sh_mmcif MMC host driver to process requests
> asynchronously instead of waiting in its .request() method for completion.
> This is achieved by using threaded IRQs.
>
> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>

Thanks for your work on this! Interesting with threaded IRQ support.

Would it be possible to split this up into smaller chunks somehow?
This rather big patch is more or less a rewrite of the driver - so it
somehow looks to me like you're implementing multiple functional
changes in a single commit. Maybe you're not doing that, but if so
then at least the new code flow needs some documentation.

How about including some ascii-art style charts of the different
states involved in reads and writes both for PIO and DMA? I'm thinking
of something similar to the comments that describe the bus transaction
and interrupts in drivers/i2c/busses/i2c-sh_mobile.c.

Cheers,

/ magnus

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-02 15:24   ` Magnus Damm
  0 siblings, 0 replies; 30+ messages in thread
From: Magnus Damm @ 2011-12-02 15:24 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Chris Ball

Hi Guennadi,

On Thu, Dec 1, 2011 at 12:07 AM, Guennadi Liakhovetski
<g.liakhovetski@gmx.de> wrote:
> This patch converts the sh_mmcif MMC host driver to process requests
> asynchronously instead of waiting in its .request() method for completion.
> This is achieved by using threaded IRQs.
>
> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>

Thanks for your work on this! Interesting with threaded IRQ support.

Would it be possible to split this up into smaller chunks somehow?
This rather big patch is more or less a rewrite of the driver - so it
somehow looks to me like you're implementing multiple functional
changes in a single commit. Maybe you're not doing that, but if so
then at least the new code flow needs some documentation.

How about including some ascii-art style charts of the different
states involved in reads and writes both for PIO and DMA? I'm thinking
of something similar to the comments that describe the bus transaction
and interrupts in drivers/i2c/busses/i2c-sh_mobile.c.

Cheers,

/ magnus

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] mmc: sh_mmcif: process requests asynchronously
  2011-12-02 15:24   ` Magnus Damm
@ 2011-12-02 15:54     ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-02 15:54 UTC (permalink / raw)
  To: Magnus Damm; +Cc: linux-mmc, linux-sh, Chris Ball

Hi Magnus

On Sat, 3 Dec 2011, Magnus Damm wrote:

> Hi Guennadi,
> 
> On Thu, Dec 1, 2011 at 12:07 AM, Guennadi Liakhovetski
> <g.liakhovetski@gmx.de> wrote:
> > This patch converts the sh_mmcif MMC host driver to process requests
> > asynchronously instead of waiting in its .request() method for completion.
> > This is achieved by using threaded IRQs.
> >
> > Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> 
> Thanks for your work on this! Interesting with threaded IRQ support.
> 
> Would it be possible to split this up into smaller chunks somehow?
> This rather big patch is more or less a rewrite of the driver - so it
> somehow looks to me like you're implementing multiple functional
> changes in a single commit. Maybe you're not doing that, but if so
> then at least the new code flow needs some documentation.

Yes, the diff is, unfortunately, pretty large. But so far I don't see a 
good way to split it. Essentially this patch introduces two changes:

1. asynchronous request processing
2. threaded IRQ

but, I don't think, splitting these changes would make sense. You don't 
want to implement (1) by another means, other than (2), because that'd be 
a double work. And if you already do (2) - you practically arrive at (1) 
too.

I'm not that much rewriting the driver either - most low-level functions 
are preserved, but many of them have to be rearranged. E.g., functions to 
process specific requests - single block read / write, multiple block r/w, 
etc., are split into to - the part before going to sleep, and the part 
after the hardware interrupt, which is now called from the IRQ thread. But 
the diff cannot show that as moved code, so, it looks huge.

But I'll spend some more time, thinking about possibilities to split or 
reduce the diff, maybe I get an idea...

> How about including some ascii-art style charts of the different
> states involved in reads and writes both for PIO and DMA? I'm thinking
> of something similar to the comments that describe the bus transaction
> and interrupts in drivers/i2c/busses/i2c-sh_mobile.c.

Ok, I'll try to add some documentation to the next version of the patch.

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-02 15:54     ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-02 15:54 UTC (permalink / raw)
  To: Magnus Damm; +Cc: linux-mmc, linux-sh, Chris Ball

Hi Magnus

On Sat, 3 Dec 2011, Magnus Damm wrote:

> Hi Guennadi,
> 
> On Thu, Dec 1, 2011 at 12:07 AM, Guennadi Liakhovetski
> <g.liakhovetski@gmx.de> wrote:
> > This patch converts the sh_mmcif MMC host driver to process requests
> > asynchronously instead of waiting in its .request() method for completion.
> > This is achieved by using threaded IRQs.
> >
> > Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> 
> Thanks for your work on this! Interesting with threaded IRQ support.
> 
> Would it be possible to split this up into smaller chunks somehow?
> This rather big patch is more or less a rewrite of the driver - so it
> somehow looks to me like you're implementing multiple functional
> changes in a single commit. Maybe you're not doing that, but if so
> then at least the new code flow needs some documentation.

Yes, the diff is, unfortunately, pretty large. But so far I don't see a 
good way to split it. Essentially this patch introduces two changes:

1. asynchronous request processing
2. threaded IRQ

but, I don't think, splitting these changes would make sense. You don't 
want to implement (1) by another means, other than (2), because that'd be 
a double work. And if you already do (2) - you practically arrive at (1) 
too.

I'm not that much rewriting the driver either - most low-level functions 
are preserved, but many of them have to be rearranged. E.g., functions to 
process specific requests - single block read / write, multiple block r/w, 
etc., are split into to - the part before going to sleep, and the part 
after the hardware interrupt, which is now called from the IRQ thread. But 
the diff cannot show that as moved code, so, it looks huge.

But I'll spend some more time, thinking about possibilities to split or 
reduce the diff, maybe I get an idea...

> How about including some ascii-art style charts of the different
> states involved in reads and writes both for PIO and DMA? I'm thinking
> of something similar to the comments that describe the bus transaction
> and interrupts in drivers/i2c/busses/i2c-sh_mobile.c.

Ok, I'll try to add some documentation to the next version of the patch.

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH 0/4 v2] mmc_ sh_mmcif: process requests asynchronously
  2011-11-30 15:07 ` Guennadi Liakhovetski
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This used to be a single patch, which now has been split into 2, plus, two more 
not directly related patches for the same driver have been included. Patch 1/4 
is actually a fix and could be considered for 3.2, but I personally haven't 
heard any bug-reports, that could be attributed to it, so, it doesn't seem to 
be very critical. Also, on request from Magnus, a state machine description has 
been added to the file header.

Guennadi Liakhovetski (4):
  mmc: sh_mmcif: process error interrupts first
  mmc: sh_mmcif: cosmetic clean up
  mmc: sh_mmcif: process requests asynchronously
  mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member

 drivers/mmc/host/sh_mmcif.c |  709 +++++++++++++++++++++++++++++--------------
 1 files changed, 474 insertions(+), 235 deletions(-)

-- 
1.7.2.5

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH 0/4 v2] mmc_ sh_mmcif: process requests asynchronously
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This used to be a single patch, which now has been split into 2, plus, two more 
not directly related patches for the same driver have been included. Patch 1/4 
is actually a fix and could be considered for 3.2, but I personally haven't 
heard any bug-reports, that could be attributed to it, so, it doesn't seem to 
be very critical. Also, on request from Magnus, a state machine description has 
been added to the file header.

Guennadi Liakhovetski (4):
  mmc: sh_mmcif: process error interrupts first
  mmc: sh_mmcif: cosmetic clean up
  mmc: sh_mmcif: process requests asynchronously
  mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member

 drivers/mmc/host/sh_mmcif.c |  709 +++++++++++++++++++++++++++++--------------
 1 files changed, 474 insertions(+), 235 deletions(-)

-- 
1.7.2.5

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH 1/4] mmc: sh_mmcif: process error interrupts first
  2011-11-30 15:07 ` Guennadi Liakhovetski
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

If an interrupt is coming with both error and data completion status bits
set, it has to be handled as an error interrupt, for which error interrupts
have to be processed first. The current version of the driver on the
contrary doesn't recognise such interrupts as an error event, which leads
to data corruption and breaks the error recovery.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |   12 ++++++------
 1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 824fee5..63559d6 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -948,7 +948,12 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 
 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
 
-	if (state & INT_RBSYE) {
+	if (state & INT_ERR_STS) {
+		/* error interrupts - process first */
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+		err = 1;
+	} else if (state & INT_RBSYE) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
 				~(INT_RBSYE | INT_CRSPE));
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
@@ -976,11 +981,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
 				~(INT_CMD12RBE | INT_CMD12CRE));
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
-	} else if (state & INT_ERR_STS) {
-		/* err interrupts */
-		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
-		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
-		err = 1;
 	} else {
 		dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 1/4] mmc: sh_mmcif: process error interrupts first
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

If an interrupt is coming with both error and data completion status bits
set, it has to be handled as an error interrupt, for which error interrupts
have to be processed first. The current version of the driver on the
contrary doesn't recognise such interrupts as an error event, which leads
to data corruption and breaks the error recovery.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |   12 ++++++------
 1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 824fee5..63559d6 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -948,7 +948,12 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 
 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
 
-	if (state & INT_RBSYE) {
+	if (state & INT_ERR_STS) {
+		/* error interrupts - process first */
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
+		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+		err = 1;
+	} else if (state & INT_RBSYE) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
 				~(INT_RBSYE | INT_CRSPE));
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
@@ -976,11 +981,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
 				~(INT_CMD12RBE | INT_CMD12CRE));
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
-	} else if (state & INT_ERR_STS) {
-		/* err interrupts */
-		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
-		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
-		err = 1;
 	} else {
 		dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 2/4 v2] mmc: sh_mmcif: cosmetic clean up
  2011-11-30 15:07 ` Guennadi Liakhovetski
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This patch doesn't introduce any functional changes, it only simplifies
some code fragments, removes superfluous parameters, fixes typos.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |   79 ++++++++++++++++++------------------------
 1 files changed, 34 insertions(+), 45 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 63559d6..d6a534c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -123,6 +123,11 @@
 #define MASK_MRBSYTO		(1 << 1)
 #define MASK_MRSPTO		(1 << 0)
 
+#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+				 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
 /* CE_HOST_STS1 */
 #define STS1_CMDSEQ		(1 << 31)
 
@@ -173,8 +178,8 @@ struct sh_mmcif_host {
 	long timeout;
 	void __iomem *addr;
 	struct completion intr_wait;
+	spinlock_t lock;		/* protect sh_mmcif_host::state */
 	enum mmcif_state state;
-	spinlock_t lock;
 	bool power;
 	bool card_present;
 
@@ -409,7 +414,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 {
 	u32 state1, state2;
-	int ret, timeout = 10000000;
+	int ret, timeout;
 
 	host->sd_error = false;
 
@@ -421,31 +426,30 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
-		while (1) {
-			timeout--;
-			if (timeout < 0) {
-				dev_err(&host->pd->dev,
-					"Forceed end of command sequence timeout err\n");
-				return -EIO;
-			}
+		for (timeout = 10000000; timeout; timeout--) {
 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
-								& STS1_CMDSEQ))
+			      & STS1_CMDSEQ))
 				break;
 			mdelay(1);
 		}
+		if (!timeout) {
+			dev_err(&host->pd->dev,
+				"Forced end of command sequence timeout err\n");
+			return -EIO;
+		}
 		sh_mmcif_sync_reset(host);
 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
+		dev_dbg(&host->pd->dev, ": CRC error\n");
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
+		dev_dbg(&host->pd->dev, ": Timeout\n");
 		ret = -ETIMEDOUT;
 	} else {
-		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
+		dev_dbg(&host->pd->dev, ": End/Index error\n");
 		ret = -EIO;
 	}
 	return ret;
@@ -668,55 +672,44 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 				struct mmc_request *mrq, u32 opc)
 {
-	int ret;
-
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_read(host, mrq);
-		break;
+		return sh_mmcif_multi_read(host, mrq);
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_write(host, mrq);
-		break;
+		return sh_mmcif_multi_write(host, mrq);
 	case MMC_WRITE_BLOCK:
-		ret = sh_mmcif_single_write(host, mrq);
-		break;
+		return sh_mmcif_single_write(host, mrq);
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		ret = sh_mmcif_single_read(host, mrq);
-		break;
+		return sh_mmcif_single_read(host, mrq);
 	default:
 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
-		ret = -EINVAL;
-		break;
+		return -EINVAL;
 	}
-	return ret;
 }
 
 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
-			struct mmc_request *mrq, struct mmc_command *cmd)
+			       struct mmc_request *mrq)
 {
+	struct mmc_command *cmd = mrq->cmd;
 	long time;
-	int ret = 0, mask = 0;
-	u32 opc = cmd->opcode;
+	int ret = 0;
+	u32 mask, opc = cmd->opcode;
 
 	switch (opc) {
-	/* respons busy check */
+	/* response busy check */
 	case MMC_SWITCH:
 	case MMC_STOP_TRANSMISSION:
 	case MMC_SET_WRITE_PROT:
 	case MMC_CLR_WRITE_PROT:
 	case MMC_ERASE:
 	case MMC_GEN_CMD:
-		mask = MASK_MRBSYE;
+		mask = MASK_START_CMD | MASK_MRBSYE;
 		break;
 	default:
-		mask = MASK_MCRSPE;
+		mask = MASK_START_CMD | MASK_MCRSPE;
 		break;
 	}
-	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
-		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
-		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
-		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
 
 	if (host->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
@@ -784,8 +777,9 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
-		struct mmc_request *mrq, struct mmc_command *cmd)
+			      struct mmc_request *mrq)
 {
+	struct mmc_command *cmd = mrq->stop;
 	long time;
 
 	if (mrq->cmd->opcode = MMC_READ_MULTIPLE_BLOCK)
@@ -854,11 +848,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 				sh_mmcif_start_dma_tx(host);
 		}
 	}
-	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
+	sh_mmcif_start_cmd(host, mrq);
 	host->data = NULL;
 
 	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
+		sh_mmcif_stop_cmd(host, mrq);
 	host->state = STATE_IDLE;
 	mmc_request_done(mmc, mrq);
 }
@@ -935,11 +929,6 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
-static void sh_mmcif_detect(struct mmc_host *mmc)
-{
-	mmc_detect_change(mmc, 0);
-}
-
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
@@ -1101,7 +1090,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 		goto clean_up3;
 	}
 
-	sh_mmcif_detect(host->mmc);
+	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 2/4 v2] mmc: sh_mmcif: cosmetic clean up
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This patch doesn't introduce any functional changes, it only simplifies
some code fragments, removes superfluous parameters, fixes typos.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |   79 ++++++++++++++++++------------------------
 1 files changed, 34 insertions(+), 45 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 63559d6..d6a534c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -123,6 +123,11 @@
 #define MASK_MRBSYTO		(1 << 1)
 #define MASK_MRSPTO		(1 << 0)
 
+#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+				 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
 /* CE_HOST_STS1 */
 #define STS1_CMDSEQ		(1 << 31)
 
@@ -173,8 +178,8 @@ struct sh_mmcif_host {
 	long timeout;
 	void __iomem *addr;
 	struct completion intr_wait;
+	spinlock_t lock;		/* protect sh_mmcif_host::state */
 	enum mmcif_state state;
-	spinlock_t lock;
 	bool power;
 	bool card_present;
 
@@ -409,7 +414,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 {
 	u32 state1, state2;
-	int ret, timeout = 10000000;
+	int ret, timeout;
 
 	host->sd_error = false;
 
@@ -421,31 +426,30 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
-		while (1) {
-			timeout--;
-			if (timeout < 0) {
-				dev_err(&host->pd->dev,
-					"Forceed end of command sequence timeout err\n");
-				return -EIO;
-			}
+		for (timeout = 10000000; timeout; timeout--) {
 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
-								& STS1_CMDSEQ))
+			      & STS1_CMDSEQ))
 				break;
 			mdelay(1);
 		}
+		if (!timeout) {
+			dev_err(&host->pd->dev,
+				"Forced end of command sequence timeout err\n");
+			return -EIO;
+		}
 		sh_mmcif_sync_reset(host);
 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
+		dev_dbg(&host->pd->dev, ": CRC error\n");
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
+		dev_dbg(&host->pd->dev, ": Timeout\n");
 		ret = -ETIMEDOUT;
 	} else {
-		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
+		dev_dbg(&host->pd->dev, ": End/Index error\n");
 		ret = -EIO;
 	}
 	return ret;
@@ -668,55 +672,44 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 				struct mmc_request *mrq, u32 opc)
 {
-	int ret;
-
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_read(host, mrq);
-		break;
+		return sh_mmcif_multi_read(host, mrq);
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		ret = sh_mmcif_multi_write(host, mrq);
-		break;
+		return sh_mmcif_multi_write(host, mrq);
 	case MMC_WRITE_BLOCK:
-		ret = sh_mmcif_single_write(host, mrq);
-		break;
+		return sh_mmcif_single_write(host, mrq);
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		ret = sh_mmcif_single_read(host, mrq);
-		break;
+		return sh_mmcif_single_read(host, mrq);
 	default:
 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
-		ret = -EINVAL;
-		break;
+		return -EINVAL;
 	}
-	return ret;
 }
 
 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
-			struct mmc_request *mrq, struct mmc_command *cmd)
+			       struct mmc_request *mrq)
 {
+	struct mmc_command *cmd = mrq->cmd;
 	long time;
-	int ret = 0, mask = 0;
-	u32 opc = cmd->opcode;
+	int ret = 0;
+	u32 mask, opc = cmd->opcode;
 
 	switch (opc) {
-	/* respons busy check */
+	/* response busy check */
 	case MMC_SWITCH:
 	case MMC_STOP_TRANSMISSION:
 	case MMC_SET_WRITE_PROT:
 	case MMC_CLR_WRITE_PROT:
 	case MMC_ERASE:
 	case MMC_GEN_CMD:
-		mask = MASK_MRBSYE;
+		mask = MASK_START_CMD | MASK_MRBSYE;
 		break;
 	default:
-		mask = MASK_MCRSPE;
+		mask = MASK_START_CMD | MASK_MCRSPE;
 		break;
 	}
-	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
-		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
-		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
-		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
 
 	if (host->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
@@ -784,8 +777,9 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
-		struct mmc_request *mrq, struct mmc_command *cmd)
+			      struct mmc_request *mrq)
 {
+	struct mmc_command *cmd = mrq->stop;
 	long time;
 
 	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
@@ -854,11 +848,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 				sh_mmcif_start_dma_tx(host);
 		}
 	}
-	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
+	sh_mmcif_start_cmd(host, mrq);
 	host->data = NULL;
 
 	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
+		sh_mmcif_stop_cmd(host, mrq);
 	host->state = STATE_IDLE;
 	mmc_request_done(mmc, mrq);
 }
@@ -935,11 +929,6 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
-static void sh_mmcif_detect(struct mmc_host *mmc)
-{
-	mmc_detect_change(mmc, 0);
-}
-
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
@@ -1101,7 +1090,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 		goto clean_up3;
 	}
 
-	sh_mmcif_detect(host->mmc);
+	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 3/4 v2] mmc: sh_mmcif: process requests asynchronously
  2011-11-30 15:07 ` Guennadi Liakhovetski
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This patch converts the sh_mmcif MMC host driver to process requests
asynchronously instead of waiting in its .request() method for completion.
This is achieved by using threaded IRQs.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |  588 ++++++++++++++++++++++++++++++-------------
 1 files changed, 416 insertions(+), 172 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d6a534c..ec6805c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,32 @@
  *
  */
 
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
@@ -167,19 +193,38 @@ enum mmcif_state {
 	STATE_IOS,
 };
 
+enum mmcif_wait_for {
+	MMCIF_WAIT_FOR_REQUEST,
+	MMCIF_WAIT_FOR_CMD,
+	MMCIF_WAIT_FOR_MREAD,
+	MMCIF_WAIT_FOR_MWRITE,
+	MMCIF_WAIT_FOR_READ,
+	MMCIF_WAIT_FOR_WRITE,
+	MMCIF_WAIT_FOR_READ_END,
+	MMCIF_WAIT_FOR_WRITE_END,
+	MMCIF_WAIT_FOR_STOP,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
+	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct clk *hclk;
 	unsigned int clk;
 	int bus_width;
 	bool sd_error;
+	bool dying;
 	long timeout;
 	void __iomem *addr;
-	struct completion intr_wait;
+	u32 *pio_ptr;
 	spinlock_t lock;		/* protect sh_mmcif_host::state */
 	enum mmcif_state state;
+	enum mmcif_wait_for wait_for;
+	struct delayed_work timeout_work;
+	size_t blocksize;
+	int sg_idx;
+	int sg_blkidx;
 	bool power;
 	bool card_present;
 
@@ -455,125 +500,183 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	return ret;
 }
 
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	struct mmc_data *data = host->mrq->data;
+
+	host->sg_blkidx += host->blocksize;
+
+	/* data->sg->length must be a multiple of host->blocksize? */
+	BUG_ON(host->sg_blkidx > data->sg->length);
+
+	if (host->sg_blkidx = data->sg->length) {
+		host->sg_blkidx = 0;
+		if (++host->sg_idx < data->sg_len)
+			host->pio_ptr = sg_virt(++data->sg);
+	} else {
+		host->pio_ptr = p;
+	}
+
+	if (host->sg_idx = data->sg_len)
+		return false;
+
+	return true;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+				 struct mmc_request *mrq)
+{
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
+
+	host->wait_for = MMCIF_WAIT_FOR_READ;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 
 	/* buffer read end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_READ_END;
 
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, j, sec, *p;
-
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-			/* buf read enable */
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
-
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
-
-			for (i = 0; i < blocksize / 4; i++)
-				*p++ = sh_mmcif_readl(host->addr,
-						      MMCIF_CE_DATA);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+
+	if (!data->sg_len || !data->sg->length)
+		return;
+
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
+
+	host->wait_for = MMCIF_WAIT_FOR_MREAD;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+	return true;
 }
 
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 					struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
 
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf write enable */
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 
 	/* buffer write end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
-						struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 i, sec, j, blocksize, *p;
 
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
+	if (!data->sg_len || !data->sg->length)
+		return;
 
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
-			/* buf write enable*/
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
 
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
 
-			for (i = 0; i < blocksize / 4; i++)
-				sh_mmcif_writel(host->addr,
-						MMCIF_CE_DATA, *p++);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	return true;
 }
 
 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -670,18 +773,22 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
-				struct mmc_request *mrq, u32 opc)
+			       struct mmc_request *mrq, u32 opc)
 {
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_read(host, mrq);
+		sh_mmcif_multi_read(host, mrq);
+		return 0;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_write(host, mrq);
+		sh_mmcif_multi_write(host, mrq);
+		return 0;
 	case MMC_WRITE_BLOCK:
-		return sh_mmcif_single_write(host, mrq);
+		sh_mmcif_single_write(host, mrq);
+		return 0;
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		return sh_mmcif_single_read(host, mrq);
+		sh_mmcif_single_read(host, mrq);
+		return 0;
 	default:
 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
 		return -EINVAL;
@@ -692,9 +799,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 			       struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->cmd;
-	long time;
-	int ret = 0;
-	u32 mask, opc = cmd->opcode;
+	u32 opc = cmd->opcode;
+	u32 mask;
 
 	switch (opc) {
 	/* response busy check */
@@ -725,62 +831,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-		host->timeout);
-	if (time <= 0) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	if (host->sd_error) {
-		switch (cmd->opcode) {
-		case MMC_ALL_SEND_CID:
-		case MMC_SELECT_CARD:
-		case MMC_APP_CMD:
-			cmd->error = -ETIMEDOUT;
-			break;
-		default:
-			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
-					cmd->opcode);
-			cmd->error = sh_mmcif_error_manage(host);
-			break;
-		}
-		host->sd_error = false;
-		return;
-	}
-	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = 0;
-		return;
-	}
-	sh_mmcif_get_response(host, cmd);
-	if (host->data) {
-		if (!host->dma_active) {
-			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
-		} else {
-			long time -				wait_for_completion_interruptible_timeout(&host->dma_complete,
-									  host->timeout);
-			if (!time)
-				ret = -ETIMEDOUT;
-			else if (time < 0)
-				ret = time;
-			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
-					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
-			host->dma_active = false;
-		}
-		if (ret < 0)
-			mrq->data->bytes_xfered = 0;
-		else
-			mrq->data->bytes_xfered -				mrq->data->blocks * mrq->data->blksz;
-	}
-	cmd->error = ret;
+	host->wait_for = MMCIF_WAIT_FOR_CMD;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->stop;
-	long time;
 
 	if (mrq->cmd->opcode = MMC_READ_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -792,14 +850,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		return;
 	}
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	sh_mmcif_get_cmd12response(host, cmd);
-	cmd->error = 0;
+	host->wait_for = MMCIF_WAIT_FOR_STOP;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -838,23 +890,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	default:
 		break;
 	}
+
+	host->mrq = mrq;
 	host->data = mrq->data;
-	if (mrq->data) {
-		if (mrq->data->flags & MMC_DATA_READ) {
-			if (host->chan_rx)
-				sh_mmcif_start_dma_rx(host);
-		} else {
-			if (host->chan_tx)
-				sh_mmcif_start_dma_tx(host);
-		}
-	}
-	sh_mmcif_start_cmd(host, mrq);
-	host->data = NULL;
 
-	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq);
-	host->state = STATE_IDLE;
-	mmc_request_done(mmc, mrq);
+	sh_mmcif_start_cmd(host, mrq);
 }
 
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -929,6 +969,157 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+	struct mmc_command *cmd = host->mrq->cmd;
+	long time;
+
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			host->sd_error = false;
+			break;
+		default:
+			cmd->error = sh_mmcif_error_manage(host);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+				cmd->opcode, cmd->error);
+			break;
+		}
+		return false;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = 0;
+		return false;
+	}
+
+	sh_mmcif_get_response(host, cmd);
+
+	if (!host->data)
+		return false;
+
+	if (host->mrq->data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			sh_mmcif_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			sh_mmcif_start_dma_tx(host);
+	}
+
+	if (!host->dma_active) {
+		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!host->data->error)
+			return true;
+		return false;
+	}
+
+	/* Running in the IRQ thread, can sleep */
+	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+							 host->timeout);
+	if (host->sd_error) {
+		dev_err(host->mmc->parent,
+			"Error IRQ while waiting for DMA completion!\n");
+		/* Woken up by an error IRQ: abort DMA */
+		if (host->data->flags & MMC_DATA_READ)
+			dmaengine_terminate_all(host->chan_rx);
+		else
+			dmaengine_terminate_all(host->chan_tx);
+		host->data->error = sh_mmcif_error_manage(host);
+	} else if (!time) {
+		host->data->error = -ETIMEDOUT;
+	} else if (time < 0) {
+		host->data->error = time;
+	}
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	host->dma_active = false;
+
+	if (host->data->error)
+		host->data->bytes_xfered = 0;
+
+	return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	struct mmc_request *mrq = host->mrq;
+
+	cancel_delayed_work_sync(&host->timeout_work);
+
+	/*
+	 * All handlers return true, if processing continues, and false, if the
+	 * request has to be completed - successfully or not
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_REQUEST:
+		/* We're too late, the timeout has already kicked in */
+		return IRQ_HANDLED;
+	case MMCIF_WAIT_FOR_CMD:
+		if (sh_mmcif_end_cmd(host))
+			/* Wait for data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+		if (sh_mmcif_mread_block(host))
+			/* Wait for more data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_READ:
+		if (sh_mmcif_read_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MWRITE:
+		if (sh_mmcif_mwrite_block(host))
+			/* Wait data to write */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_WRITE:
+		if (sh_mmcif_write_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		if (host->sd_error) {
+			mrq->stop->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		sh_mmcif_get_cmd12response(host, mrq->stop);
+		mrq->stop->error = 0;
+		break;
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		if (host->sd_error)
+			mrq->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+		host->data = NULL;
+
+		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
+			mrq->data->bytes_xfered +				mrq->data->blocks * mrq->data->blksz;
+
+		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+			sh_mmcif_stop_cmd(host, mrq);
+			if (!mrq->stop->error)
+				return IRQ_HANDLED;
+		}
+	}
+
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->state = STATE_IDLE;
+	mmc_request_done(host->mmc, mrq);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
@@ -980,14 +1171,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		host->sd_error = true;
 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
-		complete(&host->intr_wait);
-	else
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+		if (!host->dma_active)
+			return IRQ_WAKE_THREAD;
+		else if (host->sd_error)
+			mmcif_dma_complete(host);
+	} else {
 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+	}
 
 	return IRQ_HANDLED;
 }
 
+static void mmcif_timeout_work(struct work_struct *work)
+{
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+	struct mmc_request *mrq = host->mrq;
+
+	if (host->dying)
+		/* Don't run after mmc_remove_host() */
+		return;
+
+	/*
+	 * Handle races with cancel_delayed_work(), unless
+	 * cancel_delayed_work_sync() is used
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_CMD:
+		mrq->cmd->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		mrq->stop->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+	case MMCIF_WAIT_FOR_MWRITE:
+	case MMCIF_WAIT_FOR_READ:
+	case MMCIF_WAIT_FOR_WRITE:
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		host->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	host->state = STATE_IDLE;
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->data = NULL;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 {
 	int ret = 0, irq[2];
@@ -1041,7 +1276,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_completion(&host->intr_wait);
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
@@ -1078,18 +1312,20 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
-	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up3;
 	}
-	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up3;
 	}
 
+	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
 	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1116,11 +1352,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	host->dying = true;
 	pm_runtime_get_sync(&pdev->dev);
 
 	mmc_remove_host(host->mmc);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
+	/*
+	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+	 * mmc_remove_host() call above. But swapping order doesn't help either
+	 * (a query on the linux-mmc mailing list didn't bring any replies).
+	 */
+	cancel_delayed_work_sync(&host->timeout_work);
+
 	if (host->addr)
 		iounmap(host->addr);
 
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 3/4 v2] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

This patch converts the sh_mmcif MMC host driver to process requests
asynchronously instead of waiting in its .request() method for completion.
This is achieved by using threaded IRQs.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |  588 ++++++++++++++++++++++++++++++-------------
 1 files changed, 416 insertions(+), 172 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d6a534c..ec6805c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,32 @@
  *
  */
 
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
@@ -167,19 +193,38 @@ enum mmcif_state {
 	STATE_IOS,
 };
 
+enum mmcif_wait_for {
+	MMCIF_WAIT_FOR_REQUEST,
+	MMCIF_WAIT_FOR_CMD,
+	MMCIF_WAIT_FOR_MREAD,
+	MMCIF_WAIT_FOR_MWRITE,
+	MMCIF_WAIT_FOR_READ,
+	MMCIF_WAIT_FOR_WRITE,
+	MMCIF_WAIT_FOR_READ_END,
+	MMCIF_WAIT_FOR_WRITE_END,
+	MMCIF_WAIT_FOR_STOP,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
+	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct clk *hclk;
 	unsigned int clk;
 	int bus_width;
 	bool sd_error;
+	bool dying;
 	long timeout;
 	void __iomem *addr;
-	struct completion intr_wait;
+	u32 *pio_ptr;
 	spinlock_t lock;		/* protect sh_mmcif_host::state */
 	enum mmcif_state state;
+	enum mmcif_wait_for wait_for;
+	struct delayed_work timeout_work;
+	size_t blocksize;
+	int sg_idx;
+	int sg_blkidx;
 	bool power;
 	bool card_present;
 
@@ -455,125 +500,183 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	return ret;
 }
 
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	struct mmc_data *data = host->mrq->data;
+
+	host->sg_blkidx += host->blocksize;
+
+	/* data->sg->length must be a multiple of host->blocksize? */
+	BUG_ON(host->sg_blkidx > data->sg->length);
+
+	if (host->sg_blkidx == data->sg->length) {
+		host->sg_blkidx = 0;
+		if (++host->sg_idx < data->sg_len)
+			host->pio_ptr = sg_virt(++data->sg);
+	} else {
+		host->pio_ptr = p;
+	}
+
+	if (host->sg_idx == data->sg_len)
+		return false;
+
+	return true;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+				 struct mmc_request *mrq)
+{
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
+
+	host->wait_for = MMCIF_WAIT_FOR_READ;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 
 	/* buffer read end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_READ_END;
 
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, j, sec, *p;
-
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-			/* buf read enable */
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
-
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
-
-			for (i = 0; i < blocksize / 4; i++)
-				*p++ = sh_mmcif_readl(host->addr,
-						      MMCIF_CE_DATA);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+
+	if (!data->sg_len || !data->sg->length)
+		return;
+
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
+
+	host->wait_for = MMCIF_WAIT_FOR_MREAD;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+	return true;
 }
 
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 					struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
 
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf write enable */
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 
 	/* buffer write end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
-						struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 i, sec, j, blocksize, *p;
 
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
+	if (!data->sg_len || !data->sg->length)
+		return;
 
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
-			/* buf write enable*/
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
 
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
 
-			for (i = 0; i < blocksize / 4; i++)
-				sh_mmcif_writel(host->addr,
-						MMCIF_CE_DATA, *p++);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	return true;
 }
 
 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -670,18 +773,22 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
-				struct mmc_request *mrq, u32 opc)
+			       struct mmc_request *mrq, u32 opc)
 {
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_read(host, mrq);
+		sh_mmcif_multi_read(host, mrq);
+		return 0;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_write(host, mrq);
+		sh_mmcif_multi_write(host, mrq);
+		return 0;
 	case MMC_WRITE_BLOCK:
-		return sh_mmcif_single_write(host, mrq);
+		sh_mmcif_single_write(host, mrq);
+		return 0;
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		return sh_mmcif_single_read(host, mrq);
+		sh_mmcif_single_read(host, mrq);
+		return 0;
 	default:
 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
 		return -EINVAL;
@@ -692,9 +799,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 			       struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->cmd;
-	long time;
-	int ret = 0;
-	u32 mask, opc = cmd->opcode;
+	u32 opc = cmd->opcode;
+	u32 mask;
 
 	switch (opc) {
 	/* response busy check */
@@ -725,62 +831,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-		host->timeout);
-	if (time <= 0) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	if (host->sd_error) {
-		switch (cmd->opcode) {
-		case MMC_ALL_SEND_CID:
-		case MMC_SELECT_CARD:
-		case MMC_APP_CMD:
-			cmd->error = -ETIMEDOUT;
-			break;
-		default:
-			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
-					cmd->opcode);
-			cmd->error = sh_mmcif_error_manage(host);
-			break;
-		}
-		host->sd_error = false;
-		return;
-	}
-	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = 0;
-		return;
-	}
-	sh_mmcif_get_response(host, cmd);
-	if (host->data) {
-		if (!host->dma_active) {
-			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
-		} else {
-			long time =
-				wait_for_completion_interruptible_timeout(&host->dma_complete,
-									  host->timeout);
-			if (!time)
-				ret = -ETIMEDOUT;
-			else if (time < 0)
-				ret = time;
-			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
-					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
-			host->dma_active = false;
-		}
-		if (ret < 0)
-			mrq->data->bytes_xfered = 0;
-		else
-			mrq->data->bytes_xfered =
-				mrq->data->blocks * mrq->data->blksz;
-	}
-	cmd->error = ret;
+	host->wait_for = MMCIF_WAIT_FOR_CMD;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->stop;
-	long time;
 
 	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -792,14 +850,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		return;
 	}
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	sh_mmcif_get_cmd12response(host, cmd);
-	cmd->error = 0;
+	host->wait_for = MMCIF_WAIT_FOR_STOP;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -838,23 +890,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	default:
 		break;
 	}
+
+	host->mrq = mrq;
 	host->data = mrq->data;
-	if (mrq->data) {
-		if (mrq->data->flags & MMC_DATA_READ) {
-			if (host->chan_rx)
-				sh_mmcif_start_dma_rx(host);
-		} else {
-			if (host->chan_tx)
-				sh_mmcif_start_dma_tx(host);
-		}
-	}
-	sh_mmcif_start_cmd(host, mrq);
-	host->data = NULL;
 
-	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq);
-	host->state = STATE_IDLE;
-	mmc_request_done(mmc, mrq);
+	sh_mmcif_start_cmd(host, mrq);
 }
 
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -929,6 +969,157 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+	struct mmc_command *cmd = host->mrq->cmd;
+	long time;
+
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			host->sd_error = false;
+			break;
+		default:
+			cmd->error = sh_mmcif_error_manage(host);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+				cmd->opcode, cmd->error);
+			break;
+		}
+		return false;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = 0;
+		return false;
+	}
+
+	sh_mmcif_get_response(host, cmd);
+
+	if (!host->data)
+		return false;
+
+	if (host->mrq->data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			sh_mmcif_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			sh_mmcif_start_dma_tx(host);
+	}
+
+	if (!host->dma_active) {
+		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!host->data->error)
+			return true;
+		return false;
+	}
+
+	/* Running in the IRQ thread, can sleep */
+	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+							 host->timeout);
+	if (host->sd_error) {
+		dev_err(host->mmc->parent,
+			"Error IRQ while waiting for DMA completion!\n");
+		/* Woken up by an error IRQ: abort DMA */
+		if (host->data->flags & MMC_DATA_READ)
+			dmaengine_terminate_all(host->chan_rx);
+		else
+			dmaengine_terminate_all(host->chan_tx);
+		host->data->error = sh_mmcif_error_manage(host);
+	} else if (!time) {
+		host->data->error = -ETIMEDOUT;
+	} else if (time < 0) {
+		host->data->error = time;
+	}
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	host->dma_active = false;
+
+	if (host->data->error)
+		host->data->bytes_xfered = 0;
+
+	return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	struct mmc_request *mrq = host->mrq;
+
+	cancel_delayed_work_sync(&host->timeout_work);
+
+	/*
+	 * All handlers return true, if processing continues, and false, if the
+	 * request has to be completed - successfully or not
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_REQUEST:
+		/* We're too late, the timeout has already kicked in */
+		return IRQ_HANDLED;
+	case MMCIF_WAIT_FOR_CMD:
+		if (sh_mmcif_end_cmd(host))
+			/* Wait for data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+		if (sh_mmcif_mread_block(host))
+			/* Wait for more data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_READ:
+		if (sh_mmcif_read_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MWRITE:
+		if (sh_mmcif_mwrite_block(host))
+			/* Wait data to write */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_WRITE:
+		if (sh_mmcif_write_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		if (host->sd_error) {
+			mrq->stop->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		sh_mmcif_get_cmd12response(host, mrq->stop);
+		mrq->stop->error = 0;
+		break;
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		if (host->sd_error)
+			mrq->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+		host->data = NULL;
+
+		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
+			mrq->data->bytes_xfered =
+				mrq->data->blocks * mrq->data->blksz;
+
+		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+			sh_mmcif_stop_cmd(host, mrq);
+			if (!mrq->stop->error)
+				return IRQ_HANDLED;
+		}
+	}
+
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->state = STATE_IDLE;
+	mmc_request_done(host->mmc, mrq);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
@@ -980,14 +1171,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		host->sd_error = true;
 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
-		complete(&host->intr_wait);
-	else
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+		if (!host->dma_active)
+			return IRQ_WAKE_THREAD;
+		else if (host->sd_error)
+			mmcif_dma_complete(host);
+	} else {
 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+	}
 
 	return IRQ_HANDLED;
 }
 
+static void mmcif_timeout_work(struct work_struct *work)
+{
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+	struct mmc_request *mrq = host->mrq;
+
+	if (host->dying)
+		/* Don't run after mmc_remove_host() */
+		return;
+
+	/*
+	 * Handle races with cancel_delayed_work(), unless
+	 * cancel_delayed_work_sync() is used
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_CMD:
+		mrq->cmd->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		mrq->stop->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+	case MMCIF_WAIT_FOR_MWRITE:
+	case MMCIF_WAIT_FOR_READ:
+	case MMCIF_WAIT_FOR_WRITE:
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		host->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	host->state = STATE_IDLE;
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->data = NULL;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 {
 	int ret = 0, irq[2];
@@ -1041,7 +1276,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_completion(&host->intr_wait);
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
@@ -1078,18 +1312,20 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
-	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up3;
 	}
-	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up3;
 	}
 
+	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
 	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1116,11 +1352,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	host->dying = true;
 	pm_runtime_get_sync(&pdev->dev);
 
 	mmc_remove_host(host->mmc);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
+	/*
+	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+	 * mmc_remove_host() call above. But swapping order doesn't help either
+	 * (a query on the linux-mmc mailing list didn't bring any replies).
+	 */
+	cancel_delayed_work_sync(&host->timeout_work);
+
 	if (host->addr)
 		iounmap(host->addr);
 
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 4/4] mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member
  2011-11-30 15:07 ` Guennadi Liakhovetski
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |   94 +++++++++++++++++++++++--------------------
 1 files changed, 50 insertions(+), 44 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ec6805c..29bcb75 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -207,7 +207,6 @@ enum mmcif_wait_for {
 
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
-	struct mmc_data *data;
 	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct clk *hclk;
@@ -250,19 +249,21 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
 static void mmcif_dma_complete(void *arg)
 {
 	struct sh_mmcif_host *host = arg;
+	struct mmc_data *data = host->mrq->data;
+
 	dev_dbg(&host->pd->dev, "Command completed\n");
 
-	if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
+	if (WARN(!data, "%s: NULL data in DMA completion!\n",
 		 dev_name(&host->pd->dev)))
 		return;
 
-	if (host->data->flags & MMC_DATA_READ)
+	if (data->flags & MMC_DATA_READ)
 		dma_unmap_sg(host->chan_rx->device->dev,
-			     host->data->sg, host->data->sg_len,
+			     data->sg, data->sg_len,
 			     DMA_FROM_DEVICE);
 	else
 		dma_unmap_sg(host->chan_tx->device->dev,
-			     host->data->sg, host->data->sg_len,
+			     data->sg, data->sg_len,
 			     DMA_TO_DEVICE);
 
 	complete(&host->dma_complete);
@@ -270,13 +271,14 @@ static void mmcif_dma_complete(void *arg)
 
 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 {
-	struct scatterlist *sg = host->data->sg;
+	struct mmc_data *data = host->mrq->data;
+	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
-	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 			 DMA_FROM_DEVICE);
 	if (ret > 0) {
 		host->dma_active = true;
@@ -292,7 +294,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 		dma_async_issue_pending(chan);
 	}
 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
-		__func__, host->data->sg_len, ret, cookie);
+		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
@@ -313,18 +315,19 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 	}
 
 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
-		desc, cookie, host->data->sg_len);
+		desc, cookie, data->sg_len);
 }
 
 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 {
-	struct scatterlist *sg = host->data->sg;
+	struct mmc_data *data = host->mrq->data;
+	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
-	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 			 DMA_TO_DEVICE);
 	if (ret > 0) {
 		host->dma_active = true;
@@ -340,7 +343,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 		dma_async_issue_pending(chan);
 	}
 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
-		__func__, host->data->sg_len, ret, cookie);
+		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
@@ -698,8 +701,11 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 }
 
 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
-		struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+			    struct mmc_request *mrq)
 {
+	struct mmc_data *data = mrq->data;
+	struct mmc_command *cmd = mrq->cmd;
+	u32 opc = cmd->opcode;
 	u32 tmp = 0;
 
 	/* Response Type check */
@@ -731,7 +737,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 		break;
 	}
 	/* WDAT / DATW */
-	if (host->data) {
+	if (data) {
 		tmp |= CMD_SET_WDAT;
 		switch (host->bus_width) {
 		case MMC_BUS_WIDTH_1:
@@ -755,7 +761,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 	if (opc = MMC_READ_MULTIPLE_BLOCK || opc = MMC_WRITE_MULTIPLE_BLOCK) {
 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
-					mrq->data->blocks << 16);
+				data->blocks << 16);
 	}
 	/* RIDXC[1:0] check bits */
 	if (opc = MMC_SEND_OP_COND || opc = MMC_ALL_SEND_CID ||
@@ -769,7 +775,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 		opc = MMC_SEND_CSD || opc = MMC_SEND_CID)
 		tmp |= CMD_SET_CRC7C_INTERNAL;
 
-	return opc = ((opc << 24) | tmp);
+	return (opc << 24) | tmp;
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
@@ -817,12 +823,12 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 		break;
 	}
 
-	if (host->data) {
+	if (mrq->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
 				mrq->data->blksz);
 	}
-	opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+	opc = sh_mmcif_set_cmd(host, mrq);
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
@@ -838,15 +844,16 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
-	struct mmc_command *cmd = mrq->stop;
-
-	if (mrq->cmd->opcode = MMC_READ_MULTIPLE_BLOCK)
+	switch (mrq->cmd->opcode) {
+	case MMC_READ_MULTIPLE_BLOCK:
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
-	else if (mrq->cmd->opcode = MMC_WRITE_MULTIPLE_BLOCK)
+		break;
+	case MMC_WRITE_MULTIPLE_BLOCK:
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
-	else {
+		break;
+	default:
 		dev_err(&host->pd->dev, "unsupported stop cmd\n");
-		cmd->error = sh_mmcif_error_manage(host);
+		mrq->stop->error = sh_mmcif_error_manage(host);
 		return;
 	}
 
@@ -892,7 +899,6 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	}
 
 	host->mrq = mrq;
-	host->data = mrq->data;
 
 	sh_mmcif_start_cmd(host, mrq);
 }
@@ -972,6 +978,7 @@ static struct mmc_host_ops sh_mmcif_ops = {
 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 {
 	struct mmc_command *cmd = host->mrq->cmd;
+	struct mmc_data *data = host->mrq->data;
 	long time;
 
 	if (host->sd_error) {
@@ -997,10 +1004,10 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 
 	sh_mmcif_get_response(host, cmd);
 
-	if (!host->data)
+	if (!data)
 		return false;
 
-	if (host->mrq->data->flags & MMC_DATA_READ) {
+	if (data->flags & MMC_DATA_READ) {
 		if (host->chan_rx)
 			sh_mmcif_start_dma_rx(host);
 	} else {
@@ -1009,8 +1016,8 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 	}
 
 	if (!host->dma_active) {
-		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
-		if (!host->data->error)
+		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!data->error)
 			return true;
 		return false;
 	}
@@ -1022,22 +1029,22 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 		dev_err(host->mmc->parent,
 			"Error IRQ while waiting for DMA completion!\n");
 		/* Woken up by an error IRQ: abort DMA */
-		if (host->data->flags & MMC_DATA_READ)
+		if (data->flags & MMC_DATA_READ)
 			dmaengine_terminate_all(host->chan_rx);
 		else
 			dmaengine_terminate_all(host->chan_tx);
-		host->data->error = sh_mmcif_error_manage(host);
+		data->error = sh_mmcif_error_manage(host);
 	} else if (!time) {
-		host->data->error = -ETIMEDOUT;
+		data->error = -ETIMEDOUT;
 	} else if (time < 0) {
-		host->data->error = time;
+		data->error = time;
 	}
 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
 			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 	host->dma_active = false;
 
-	if (host->data->error)
-		host->data->bytes_xfered = 0;
+	if (data->error)
+		data->bytes_xfered = 0;
 
 	return false;
 }
@@ -1046,6 +1053,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
 	struct mmc_request *mrq = host->mrq;
+	struct mmc_data *data = mrq->data;
 
 	cancel_delayed_work_sync(&host->timeout_work);
 
@@ -1093,20 +1101,18 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 	case MMCIF_WAIT_FOR_READ_END:
 	case MMCIF_WAIT_FOR_WRITE_END:
 		if (host->sd_error)
-			mrq->data->error = sh_mmcif_error_manage(host);
+			data->error = sh_mmcif_error_manage(host);
 		break;
 	default:
 		BUG();
 	}
 
 	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
-		host->data = NULL;
+		if (!mrq->cmd->error && data && !data->error)
+			data->bytes_xfered +				data->blocks * data->blksz;
 
-		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
-			mrq->data->bytes_xfered -				mrq->data->blocks * mrq->data->blksz;
-
-		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
 			sh_mmcif_stop_cmd(host, mrq);
 			if (!mrq->stop->error)
 				return IRQ_HANDLED;
@@ -1115,6 +1121,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 
 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
 	host->state = STATE_IDLE;
+	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
 
 	return IRQ_HANDLED;
@@ -1210,7 +1217,7 @@ static void mmcif_timeout_work(struct work_struct *work)
 	case MMCIF_WAIT_FOR_WRITE:
 	case MMCIF_WAIT_FOR_READ_END:
 	case MMCIF_WAIT_FOR_WRITE_END:
-		host->data->error = sh_mmcif_error_manage(host);
+		mrq->data->error = sh_mmcif_error_manage(host);
 		break;
 	default:
 		BUG();
@@ -1218,7 +1225,6 @@ static void mmcif_timeout_work(struct work_struct *work)
 
 	host->state = STATE_IDLE;
 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
-	host->data = NULL;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
 }
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 4/4] mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member
@ 2011-12-14 18:31   ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-14 18:31 UTC (permalink / raw)
  To: linux-mmc; +Cc: linux-sh, Chris Ball, Magnus Damm

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---
 drivers/mmc/host/sh_mmcif.c |   94 +++++++++++++++++++++++--------------------
 1 files changed, 50 insertions(+), 44 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ec6805c..29bcb75 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -207,7 +207,6 @@ enum mmcif_wait_for {
 
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
-	struct mmc_data *data;
 	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct clk *hclk;
@@ -250,19 +249,21 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
 static void mmcif_dma_complete(void *arg)
 {
 	struct sh_mmcif_host *host = arg;
+	struct mmc_data *data = host->mrq->data;
+
 	dev_dbg(&host->pd->dev, "Command completed\n");
 
-	if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
+	if (WARN(!data, "%s: NULL data in DMA completion!\n",
 		 dev_name(&host->pd->dev)))
 		return;
 
-	if (host->data->flags & MMC_DATA_READ)
+	if (data->flags & MMC_DATA_READ)
 		dma_unmap_sg(host->chan_rx->device->dev,
-			     host->data->sg, host->data->sg_len,
+			     data->sg, data->sg_len,
 			     DMA_FROM_DEVICE);
 	else
 		dma_unmap_sg(host->chan_tx->device->dev,
-			     host->data->sg, host->data->sg_len,
+			     data->sg, data->sg_len,
 			     DMA_TO_DEVICE);
 
 	complete(&host->dma_complete);
@@ -270,13 +271,14 @@ static void mmcif_dma_complete(void *arg)
 
 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 {
-	struct scatterlist *sg = host->data->sg;
+	struct mmc_data *data = host->mrq->data;
+	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
-	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 			 DMA_FROM_DEVICE);
 	if (ret > 0) {
 		host->dma_active = true;
@@ -292,7 +294,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 		dma_async_issue_pending(chan);
 	}
 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
-		__func__, host->data->sg_len, ret, cookie);
+		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
@@ -313,18 +315,19 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 	}
 
 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
-		desc, cookie, host->data->sg_len);
+		desc, cookie, data->sg_len);
 }
 
 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 {
-	struct scatterlist *sg = host->data->sg;
+	struct mmc_data *data = host->mrq->data;
+	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
-	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
 			 DMA_TO_DEVICE);
 	if (ret > 0) {
 		host->dma_active = true;
@@ -340,7 +343,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 		dma_async_issue_pending(chan);
 	}
 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
-		__func__, host->data->sg_len, ret, cookie);
+		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
 		/* DMA failed, fall back to PIO */
@@ -698,8 +701,11 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 }
 
 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
-		struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+			    struct mmc_request *mrq)
 {
+	struct mmc_data *data = mrq->data;
+	struct mmc_command *cmd = mrq->cmd;
+	u32 opc = cmd->opcode;
 	u32 tmp = 0;
 
 	/* Response Type check */
@@ -731,7 +737,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 		break;
 	}
 	/* WDAT / DATW */
-	if (host->data) {
+	if (data) {
 		tmp |= CMD_SET_WDAT;
 		switch (host->bus_width) {
 		case MMC_BUS_WIDTH_1:
@@ -755,7 +761,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
-					mrq->data->blocks << 16);
+				data->blocks << 16);
 	}
 	/* RIDXC[1:0] check bits */
 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
@@ -769,7 +775,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
 		tmp |= CMD_SET_CRC7C_INTERNAL;
 
-	return opc = ((opc << 24) | tmp);
+	return (opc << 24) | tmp;
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
@@ -817,12 +823,12 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 		break;
 	}
 
-	if (host->data) {
+	if (mrq->data) {
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
 				mrq->data->blksz);
 	}
-	opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+	opc = sh_mmcif_set_cmd(host, mrq);
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
@@ -838,15 +844,16 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
-	struct mmc_command *cmd = mrq->stop;
-
-	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
+	switch (mrq->cmd->opcode) {
+	case MMC_READ_MULTIPLE_BLOCK:
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
-	else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+		break;
+	case MMC_WRITE_MULTIPLE_BLOCK:
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
-	else {
+		break;
+	default:
 		dev_err(&host->pd->dev, "unsupported stop cmd\n");
-		cmd->error = sh_mmcif_error_manage(host);
+		mrq->stop->error = sh_mmcif_error_manage(host);
 		return;
 	}
 
@@ -892,7 +899,6 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	}
 
 	host->mrq = mrq;
-	host->data = mrq->data;
 
 	sh_mmcif_start_cmd(host, mrq);
 }
@@ -972,6 +978,7 @@ static struct mmc_host_ops sh_mmcif_ops = {
 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 {
 	struct mmc_command *cmd = host->mrq->cmd;
+	struct mmc_data *data = host->mrq->data;
 	long time;
 
 	if (host->sd_error) {
@@ -997,10 +1004,10 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 
 	sh_mmcif_get_response(host, cmd);
 
-	if (!host->data)
+	if (!data)
 		return false;
 
-	if (host->mrq->data->flags & MMC_DATA_READ) {
+	if (data->flags & MMC_DATA_READ) {
 		if (host->chan_rx)
 			sh_mmcif_start_dma_rx(host);
 	} else {
@@ -1009,8 +1016,8 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 	}
 
 	if (!host->dma_active) {
-		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
-		if (!host->data->error)
+		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!data->error)
 			return true;
 		return false;
 	}
@@ -1022,22 +1029,22 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 		dev_err(host->mmc->parent,
 			"Error IRQ while waiting for DMA completion!\n");
 		/* Woken up by an error IRQ: abort DMA */
-		if (host->data->flags & MMC_DATA_READ)
+		if (data->flags & MMC_DATA_READ)
 			dmaengine_terminate_all(host->chan_rx);
 		else
 			dmaengine_terminate_all(host->chan_tx);
-		host->data->error = sh_mmcif_error_manage(host);
+		data->error = sh_mmcif_error_manage(host);
 	} else if (!time) {
-		host->data->error = -ETIMEDOUT;
+		data->error = -ETIMEDOUT;
 	} else if (time < 0) {
-		host->data->error = time;
+		data->error = time;
 	}
 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
 			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 	host->dma_active = false;
 
-	if (host->data->error)
-		host->data->bytes_xfered = 0;
+	if (data->error)
+		data->bytes_xfered = 0;
 
 	return false;
 }
@@ -1046,6 +1053,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
 	struct mmc_request *mrq = host->mrq;
+	struct mmc_data *data = mrq->data;
 
 	cancel_delayed_work_sync(&host->timeout_work);
 
@@ -1093,20 +1101,18 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 	case MMCIF_WAIT_FOR_READ_END:
 	case MMCIF_WAIT_FOR_WRITE_END:
 		if (host->sd_error)
-			mrq->data->error = sh_mmcif_error_manage(host);
+			data->error = sh_mmcif_error_manage(host);
 		break;
 	default:
 		BUG();
 	}
 
 	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
-		host->data = NULL;
+		if (!mrq->cmd->error && data && !data->error)
+			data->bytes_xfered =
+				data->blocks * data->blksz;
 
-		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
-			mrq->data->bytes_xfered =
-				mrq->data->blocks * mrq->data->blksz;
-
-		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
 			sh_mmcif_stop_cmd(host, mrq);
 			if (!mrq->stop->error)
 				return IRQ_HANDLED;
@@ -1115,6 +1121,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 
 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
 	host->state = STATE_IDLE;
+	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
 
 	return IRQ_HANDLED;
@@ -1210,7 +1217,7 @@ static void mmcif_timeout_work(struct work_struct *work)
 	case MMCIF_WAIT_FOR_WRITE:
 	case MMCIF_WAIT_FOR_READ_END:
 	case MMCIF_WAIT_FOR_WRITE_END:
-		host->data->error = sh_mmcif_error_manage(host);
+		mrq->data->error = sh_mmcif_error_manage(host);
 		break;
 	default:
 		BUG();
@@ -1218,7 +1225,6 @@ static void mmcif_timeout_work(struct work_struct *work)
 
 	host->state = STATE_IDLE;
 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
-	host->data = NULL;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
 }
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v2] mmc: sh_mmcif: process requests asynchronously
  2011-12-14 18:31   ` Guennadi Liakhovetski
@ 2011-12-25  2:20     ` Chris Ball
  -1 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-25  2:20 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Guennadi,

On Wed, Dec 14 2011, Guennadi Liakhovetski wrote:
> This patch converts the sh_mmcif MMC host driver to process requests
> asynchronously instead of waiting in its .request() method for completion.
> This is achieved by using threaded IRQs.
>
> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>

This patch (3/4) no longer applies to mmc-next -- mind resending?

Thanks,

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v2] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-25  2:20     ` Chris Ball
  0 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-25  2:20 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Guennadi,

On Wed, Dec 14 2011, Guennadi Liakhovetski wrote:
> This patch converts the sh_mmcif MMC host driver to process requests
> asynchronously instead of waiting in its .request() method for completion.
> This is achieved by using threaded IRQs.
>
> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>

This patch (3/4) no longer applies to mmc-next -- mind resending?

Thanks,

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
  2011-12-25  2:20     ` Chris Ball
@ 2011-12-25 20:07       ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-25 20:07 UTC (permalink / raw)
  To: Chris Ball; +Cc: linux-mmc, linux-sh, Magnus Damm

This patch converts the sh_mmcif MMC host driver to process requests
asynchronously instead of waiting in its .request() method for completion.
This is achieved by using threaded IRQs.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---

v3: updated on top of current mmc-next

Hi Chris

On Sat, 24 Dec 2011, Chris Ball wrote:

> Hi Guennadi,
> 
> On Wed, Dec 14 2011, Guennadi Liakhovetski wrote:
> > This patch converts the sh_mmcif MMC host driver to process requests
> > asynchronously instead of waiting in its .request() method for completion.
> > This is achieved by using threaded IRQs.
> >
> > Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> 
> This patch (3/4) no longer applies to mmc-next -- mind resending?

This one should be ok.

 drivers/mmc/host/sh_mmcif.c |  588 ++++++++++++++++++++++++++++++-------------
 1 files changed, 416 insertions(+), 172 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6da03c5..df76ac1 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,32 @@
  *
  */
 
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
@@ -168,9 +194,22 @@ enum mmcif_state {
 	STATE_IOS,
 };
 
+enum mmcif_wait_for {
+	MMCIF_WAIT_FOR_REQUEST,
+	MMCIF_WAIT_FOR_CMD,
+	MMCIF_WAIT_FOR_MREAD,
+	MMCIF_WAIT_FOR_MWRITE,
+	MMCIF_WAIT_FOR_READ,
+	MMCIF_WAIT_FOR_WRITE,
+	MMCIF_WAIT_FOR_READ_END,
+	MMCIF_WAIT_FOR_WRITE_END,
+	MMCIF_WAIT_FOR_STOP,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
+	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct sh_dmae_slave dma_slave_tx;
 	struct sh_dmae_slave dma_slave_rx;
@@ -178,11 +217,17 @@ struct sh_mmcif_host {
 	unsigned int clk;
 	int bus_width;
 	bool sd_error;
+	bool dying;
 	long timeout;
 	void __iomem *addr;
-	struct completion intr_wait;
+	u32 *pio_ptr;
 	spinlock_t lock;		/* protect sh_mmcif_host::state */
 	enum mmcif_state state;
+	enum mmcif_wait_for wait_for;
+	struct delayed_work timeout_work;
+	size_t blocksize;
+	int sg_idx;
+	int sg_blkidx;
 	bool power;
 	bool card_present;
 
@@ -468,125 +513,183 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	return ret;
 }
 
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	struct mmc_data *data = host->mrq->data;
+
+	host->sg_blkidx += host->blocksize;
+
+	/* data->sg->length must be a multiple of host->blocksize? */
+	BUG_ON(host->sg_blkidx > data->sg->length);
+
+	if (host->sg_blkidx = data->sg->length) {
+		host->sg_blkidx = 0;
+		if (++host->sg_idx < data->sg_len)
+			host->pio_ptr = sg_virt(++data->sg);
+	} else {
+		host->pio_ptr = p;
+	}
+
+	if (host->sg_idx = data->sg_len)
+		return false;
+
+	return true;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+				 struct mmc_request *mrq)
+{
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
+
+	host->wait_for = MMCIF_WAIT_FOR_READ;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 
 	/* buffer read end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_READ_END;
 
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, j, sec, *p;
-
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-			/* buf read enable */
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
-
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
-
-			for (i = 0; i < blocksize / 4; i++)
-				*p++ = sh_mmcif_readl(host->addr,
-						      MMCIF_CE_DATA);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+
+	if (!data->sg_len || !data->sg->length)
+		return;
+
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
+
+	host->wait_for = MMCIF_WAIT_FOR_MREAD;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+	return true;
 }
 
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 					struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
 
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf write enable */
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 
 	/* buffer write end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
-						struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 i, sec, j, blocksize, *p;
 
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
+	if (!data->sg_len || !data->sg->length)
+		return;
 
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
-			/* buf write enable*/
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
 
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
 
-			for (i = 0; i < blocksize / 4; i++)
-				sh_mmcif_writel(host->addr,
-						MMCIF_CE_DATA, *p++);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	return true;
 }
 
 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -683,18 +786,22 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
-				struct mmc_request *mrq, u32 opc)
+			       struct mmc_request *mrq, u32 opc)
 {
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_read(host, mrq);
+		sh_mmcif_multi_read(host, mrq);
+		return 0;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_write(host, mrq);
+		sh_mmcif_multi_write(host, mrq);
+		return 0;
 	case MMC_WRITE_BLOCK:
-		return sh_mmcif_single_write(host, mrq);
+		sh_mmcif_single_write(host, mrq);
+		return 0;
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		return sh_mmcif_single_read(host, mrq);
+		sh_mmcif_single_read(host, mrq);
+		return 0;
 	default:
 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
 		return -EINVAL;
@@ -705,9 +812,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 			       struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->cmd;
-	long time;
-	int ret = 0;
-	u32 mask, opc = cmd->opcode;
+	u32 opc = cmd->opcode;
+	u32 mask;
 
 	switch (opc) {
 	/* response busy check */
@@ -738,62 +844,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-		host->timeout);
-	if (time <= 0) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	if (host->sd_error) {
-		switch (cmd->opcode) {
-		case MMC_ALL_SEND_CID:
-		case MMC_SELECT_CARD:
-		case MMC_APP_CMD:
-			cmd->error = -ETIMEDOUT;
-			break;
-		default:
-			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
-					cmd->opcode);
-			cmd->error = sh_mmcif_error_manage(host);
-			break;
-		}
-		host->sd_error = false;
-		return;
-	}
-	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = 0;
-		return;
-	}
-	sh_mmcif_get_response(host, cmd);
-	if (host->data) {
-		if (!host->dma_active) {
-			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
-		} else {
-			long time -				wait_for_completion_interruptible_timeout(&host->dma_complete,
-									  host->timeout);
-			if (!time)
-				ret = -ETIMEDOUT;
-			else if (time < 0)
-				ret = time;
-			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
-					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
-			host->dma_active = false;
-		}
-		if (ret < 0)
-			mrq->data->bytes_xfered = 0;
-		else
-			mrq->data->bytes_xfered -				mrq->data->blocks * mrq->data->blksz;
-	}
-	cmd->error = ret;
+	host->wait_for = MMCIF_WAIT_FOR_CMD;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->stop;
-	long time;
 
 	if (mrq->cmd->opcode = MMC_READ_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -805,14 +863,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		return;
 	}
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	sh_mmcif_get_cmd12response(host, cmd);
-	cmd->error = 0;
+	host->wait_for = MMCIF_WAIT_FOR_STOP;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -851,23 +903,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	default:
 		break;
 	}
+
+	host->mrq = mrq;
 	host->data = mrq->data;
-	if (mrq->data) {
-		if (mrq->data->flags & MMC_DATA_READ) {
-			if (host->chan_rx)
-				sh_mmcif_start_dma_rx(host);
-		} else {
-			if (host->chan_tx)
-				sh_mmcif_start_dma_tx(host);
-		}
-	}
-	sh_mmcif_start_cmd(host, mrq);
-	host->data = NULL;
 
-	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq);
-	host->state = STATE_IDLE;
-	mmc_request_done(mmc, mrq);
+	sh_mmcif_start_cmd(host, mrq);
 }
 
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -942,6 +982,157 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+	struct mmc_command *cmd = host->mrq->cmd;
+	long time;
+
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			host->sd_error = false;
+			break;
+		default:
+			cmd->error = sh_mmcif_error_manage(host);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+				cmd->opcode, cmd->error);
+			break;
+		}
+		return false;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = 0;
+		return false;
+	}
+
+	sh_mmcif_get_response(host, cmd);
+
+	if (!host->data)
+		return false;
+
+	if (host->mrq->data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			sh_mmcif_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			sh_mmcif_start_dma_tx(host);
+	}
+
+	if (!host->dma_active) {
+		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!host->data->error)
+			return true;
+		return false;
+	}
+
+	/* Running in the IRQ thread, can sleep */
+	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+							 host->timeout);
+	if (host->sd_error) {
+		dev_err(host->mmc->parent,
+			"Error IRQ while waiting for DMA completion!\n");
+		/* Woken up by an error IRQ: abort DMA */
+		if (host->data->flags & MMC_DATA_READ)
+			dmaengine_terminate_all(host->chan_rx);
+		else
+			dmaengine_terminate_all(host->chan_tx);
+		host->data->error = sh_mmcif_error_manage(host);
+	} else if (!time) {
+		host->data->error = -ETIMEDOUT;
+	} else if (time < 0) {
+		host->data->error = time;
+	}
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	host->dma_active = false;
+
+	if (host->data->error)
+		host->data->bytes_xfered = 0;
+
+	return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	struct mmc_request *mrq = host->mrq;
+
+	cancel_delayed_work_sync(&host->timeout_work);
+
+	/*
+	 * All handlers return true, if processing continues, and false, if the
+	 * request has to be completed - successfully or not
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_REQUEST:
+		/* We're too late, the timeout has already kicked in */
+		return IRQ_HANDLED;
+	case MMCIF_WAIT_FOR_CMD:
+		if (sh_mmcif_end_cmd(host))
+			/* Wait for data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+		if (sh_mmcif_mread_block(host))
+			/* Wait for more data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_READ:
+		if (sh_mmcif_read_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MWRITE:
+		if (sh_mmcif_mwrite_block(host))
+			/* Wait data to write */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_WRITE:
+		if (sh_mmcif_write_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		if (host->sd_error) {
+			mrq->stop->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		sh_mmcif_get_cmd12response(host, mrq->stop);
+		mrq->stop->error = 0;
+		break;
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		if (host->sd_error)
+			mrq->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+		host->data = NULL;
+
+		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
+			mrq->data->bytes_xfered +				mrq->data->blocks * mrq->data->blksz;
+
+		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+			sh_mmcif_stop_cmd(host, mrq);
+			if (!mrq->stop->error)
+				return IRQ_HANDLED;
+		}
+	}
+
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->state = STATE_IDLE;
+	mmc_request_done(host->mmc, mrq);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
@@ -993,14 +1184,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		host->sd_error = true;
 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
-		complete(&host->intr_wait);
-	else
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+		if (!host->dma_active)
+			return IRQ_WAKE_THREAD;
+		else if (host->sd_error)
+			mmcif_dma_complete(host);
+	} else {
 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+	}
 
 	return IRQ_HANDLED;
 }
 
+static void mmcif_timeout_work(struct work_struct *work)
+{
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+	struct mmc_request *mrq = host->mrq;
+
+	if (host->dying)
+		/* Don't run after mmc_remove_host() */
+		return;
+
+	/*
+	 * Handle races with cancel_delayed_work(), unless
+	 * cancel_delayed_work_sync() is used
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_CMD:
+		mrq->cmd->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		mrq->stop->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+	case MMCIF_WAIT_FOR_MWRITE:
+	case MMCIF_WAIT_FOR_READ:
+	case MMCIF_WAIT_FOR_WRITE:
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		host->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	host->state = STATE_IDLE;
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->data = NULL;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 {
 	int ret = 0, irq[2];
@@ -1054,7 +1289,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_completion(&host->intr_wait);
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
@@ -1091,18 +1325,20 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
-	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up3;
 	}
-	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up3;
 	}
 
+	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
 	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1129,11 +1365,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	host->dying = true;
 	pm_runtime_get_sync(&pdev->dev);
 
 	mmc_remove_host(host->mmc);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
+	/*
+	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+	 * mmc_remove_host() call above. But swapping order doesn't help either
+	 * (a query on the linux-mmc mailing list didn't bring any replies).
+	 */
+	cancel_delayed_work_sync(&host->timeout_work);
+
 	if (host->addr)
 		iounmap(host->addr);
 
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-25 20:07       ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-25 20:07 UTC (permalink / raw)
  To: Chris Ball; +Cc: linux-mmc, linux-sh, Magnus Damm

This patch converts the sh_mmcif MMC host driver to process requests
asynchronously instead of waiting in its .request() method for completion.
This is achieved by using threaded IRQs.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
---

v3: updated on top of current mmc-next

Hi Chris

On Sat, 24 Dec 2011, Chris Ball wrote:

> Hi Guennadi,
> 
> On Wed, Dec 14 2011, Guennadi Liakhovetski wrote:
> > This patch converts the sh_mmcif MMC host driver to process requests
> > asynchronously instead of waiting in its .request() method for completion.
> > This is achieved by using threaded IRQs.
> >
> > Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
> 
> This patch (3/4) no longer applies to mmc-next -- mind resending?

This one should be ok.

 drivers/mmc/host/sh_mmcif.c |  588 ++++++++++++++++++++++++++++++-------------
 1 files changed, 416 insertions(+), 172 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6da03c5..df76ac1 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,32 @@
  *
  */
 
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
@@ -168,9 +194,22 @@ enum mmcif_state {
 	STATE_IOS,
 };
 
+enum mmcif_wait_for {
+	MMCIF_WAIT_FOR_REQUEST,
+	MMCIF_WAIT_FOR_CMD,
+	MMCIF_WAIT_FOR_MREAD,
+	MMCIF_WAIT_FOR_MWRITE,
+	MMCIF_WAIT_FOR_READ,
+	MMCIF_WAIT_FOR_WRITE,
+	MMCIF_WAIT_FOR_READ_END,
+	MMCIF_WAIT_FOR_WRITE_END,
+	MMCIF_WAIT_FOR_STOP,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
+	struct mmc_request *mrq;
 	struct platform_device *pd;
 	struct sh_dmae_slave dma_slave_tx;
 	struct sh_dmae_slave dma_slave_rx;
@@ -178,11 +217,17 @@ struct sh_mmcif_host {
 	unsigned int clk;
 	int bus_width;
 	bool sd_error;
+	bool dying;
 	long timeout;
 	void __iomem *addr;
-	struct completion intr_wait;
+	u32 *pio_ptr;
 	spinlock_t lock;		/* protect sh_mmcif_host::state */
 	enum mmcif_state state;
+	enum mmcif_wait_for wait_for;
+	struct delayed_work timeout_work;
+	size_t blocksize;
+	int sg_idx;
+	int sg_blkidx;
 	bool power;
 	bool card_present;
 
@@ -468,125 +513,183 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 	return ret;
 }
 
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	struct mmc_data *data = host->mrq->data;
+
+	host->sg_blkidx += host->blocksize;
+
+	/* data->sg->length must be a multiple of host->blocksize? */
+	BUG_ON(host->sg_blkidx > data->sg->length);
+
+	if (host->sg_blkidx == data->sg->length) {
+		host->sg_blkidx = 0;
+		if (++host->sg_idx < data->sg_len)
+			host->pio_ptr = sg_virt(++data->sg);
+	} else {
+		host->pio_ptr = p;
+	}
+
+	if (host->sg_idx == data->sg_len)
+		return false;
+
+	return true;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+				 struct mmc_request *mrq)
+{
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
+
+	host->wait_for = MMCIF_WAIT_FOR_READ;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
 
 	/* buffer read end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_READ_END;
 
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
-					struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, j, sec, *p;
-
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-			/* buf read enable */
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
-
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
-
-			for (i = 0; i < blocksize / 4; i++)
-				*p++ = sh_mmcif_readl(host->addr,
-						      MMCIF_CE_DATA);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+
+	if (!data->sg_len || !data->sg->length)
+		return;
+
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
+
+	host->wait_for = MMCIF_WAIT_FOR_MREAD;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+	return true;
 }
 
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 					struct mmc_request *mrq)
 {
-	struct mmc_data *data = mrq->data;
-	long time;
-	u32 blocksize, i, *p = sg_virt(data->sg);
+	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+			   BLOCK_SIZE_MASK) + 3;
 
-	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 
 	/* buf write enable */
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	blocksize = (BLOCK_SIZE_MASK &
-			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
-	for (i = 0; i < blocksize / 4; i++)
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = sg_virt(data->sg);
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
+	}
+
+	for (i = 0; i < host->blocksize / 4; i++)
 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
 
 	/* buffer write end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error)
-		return sh_mmcif_error_manage(host);
-
-	return 0;
+	return true;
 }
 
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
-						struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+				struct mmc_request *mrq)
 {
 	struct mmc_data *data = mrq->data;
-	long time;
-	u32 i, sec, j, blocksize, *p;
 
-	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
-						     MMCIF_CE_BLOCK_SET);
+	if (!data->sg_len || !data->sg->length)
+		return;
 
-	for (j = 0; j < data->sg_len; j++) {
-		p = sg_virt(data->sg);
-		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
-			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
-			/* buf write enable*/
-			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-				host->timeout);
+	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+		BLOCK_SIZE_MASK;
 
-			if (time <= 0 || host->sd_error)
-				return sh_mmcif_error_manage(host);
+	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+	host->sg_idx = 0;
+	host->sg_blkidx = 0;
+	host->pio_ptr = sg_virt(data->sg);
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
 
-			for (i = 0; i < blocksize / 4; i++)
-				sh_mmcif_writel(host->addr,
-						MMCIF_CE_DATA, *p++);
-		}
-		if (j < data->sg_len - 1)
-			data->sg++;
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+	struct mmc_data *data = host->mrq->data;
+	u32 *p = host->pio_ptr;
+	int i;
+
+	if (host->sd_error) {
+		data->error = sh_mmcif_error_manage(host);
+		return false;
 	}
-	return 0;
+
+	BUG_ON(!data->sg->length);
+
+	for (i = 0; i < host->blocksize / 4; i++)
+		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+	if (!sh_mmcif_next_block(host, p))
+		return false;
+
+	schedule_delayed_work(&host->timeout_work, host->timeout);
+	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+	return true;
 }
 
 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -683,18 +786,22 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 }
 
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
-				struct mmc_request *mrq, u32 opc)
+			       struct mmc_request *mrq, u32 opc)
 {
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_read(host, mrq);
+		sh_mmcif_multi_read(host, mrq);
+		return 0;
 	case MMC_WRITE_MULTIPLE_BLOCK:
-		return sh_mmcif_multi_write(host, mrq);
+		sh_mmcif_multi_write(host, mrq);
+		return 0;
 	case MMC_WRITE_BLOCK:
-		return sh_mmcif_single_write(host, mrq);
+		sh_mmcif_single_write(host, mrq);
+		return 0;
 	case MMC_READ_SINGLE_BLOCK:
 	case MMC_SEND_EXT_CSD:
-		return sh_mmcif_single_read(host, mrq);
+		sh_mmcif_single_read(host, mrq);
+		return 0;
 	default:
 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
 		return -EINVAL;
@@ -705,9 +812,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 			       struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->cmd;
-	long time;
-	int ret = 0;
-	u32 mask, opc = cmd->opcode;
+	u32 opc = cmd->opcode;
+	u32 mask;
 
 	switch (opc) {
 	/* response busy check */
@@ -738,62 +844,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-		host->timeout);
-	if (time <= 0) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	if (host->sd_error) {
-		switch (cmd->opcode) {
-		case MMC_ALL_SEND_CID:
-		case MMC_SELECT_CARD:
-		case MMC_APP_CMD:
-			cmd->error = -ETIMEDOUT;
-			break;
-		default:
-			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
-					cmd->opcode);
-			cmd->error = sh_mmcif_error_manage(host);
-			break;
-		}
-		host->sd_error = false;
-		return;
-	}
-	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = 0;
-		return;
-	}
-	sh_mmcif_get_response(host, cmd);
-	if (host->data) {
-		if (!host->dma_active) {
-			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
-		} else {
-			long time =
-				wait_for_completion_interruptible_timeout(&host->dma_complete,
-									  host->timeout);
-			if (!time)
-				ret = -ETIMEDOUT;
-			else if (time < 0)
-				ret = time;
-			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
-					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
-			host->dma_active = false;
-		}
-		if (ret < 0)
-			mrq->data->bytes_xfered = 0;
-		else
-			mrq->data->bytes_xfered =
-				mrq->data->blocks * mrq->data->blksz;
-	}
-	cmd->error = ret;
+	host->wait_for = MMCIF_WAIT_FOR_CMD;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
 	struct mmc_command *cmd = mrq->stop;
-	long time;
 
 	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -805,14 +863,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		return;
 	}
 
-	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
-			host->timeout);
-	if (time <= 0 || host->sd_error) {
-		cmd->error = sh_mmcif_error_manage(host);
-		return;
-	}
-	sh_mmcif_get_cmd12response(host, cmd);
-	cmd->error = 0;
+	host->wait_for = MMCIF_WAIT_FOR_STOP;
+	schedule_delayed_work(&host->timeout_work, host->timeout);
 }
 
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -851,23 +903,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	default:
 		break;
 	}
+
+	host->mrq = mrq;
 	host->data = mrq->data;
-	if (mrq->data) {
-		if (mrq->data->flags & MMC_DATA_READ) {
-			if (host->chan_rx)
-				sh_mmcif_start_dma_rx(host);
-		} else {
-			if (host->chan_tx)
-				sh_mmcif_start_dma_tx(host);
-		}
-	}
-	sh_mmcif_start_cmd(host, mrq);
-	host->data = NULL;
 
-	if (!mrq->cmd->error && mrq->stop)
-		sh_mmcif_stop_cmd(host, mrq);
-	host->state = STATE_IDLE;
-	mmc_request_done(mmc, mrq);
+	sh_mmcif_start_cmd(host, mrq);
 }
 
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -942,6 +982,157 @@ static struct mmc_host_ops sh_mmcif_ops = {
 	.get_cd		= sh_mmcif_get_cd,
 };
 
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
+{
+	struct mmc_command *cmd = host->mrq->cmd;
+	long time;
+
+	if (host->sd_error) {
+		switch (cmd->opcode) {
+		case MMC_ALL_SEND_CID:
+		case MMC_SELECT_CARD:
+		case MMC_APP_CMD:
+			cmd->error = -ETIMEDOUT;
+			host->sd_error = false;
+			break;
+		default:
+			cmd->error = sh_mmcif_error_manage(host);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+				cmd->opcode, cmd->error);
+			break;
+		}
+		return false;
+	}
+	if (!(cmd->flags & MMC_RSP_PRESENT)) {
+		cmd->error = 0;
+		return false;
+	}
+
+	sh_mmcif_get_response(host, cmd);
+
+	if (!host->data)
+		return false;
+
+	if (host->mrq->data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			sh_mmcif_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			sh_mmcif_start_dma_tx(host);
+	}
+
+	if (!host->dma_active) {
+		host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+		if (!host->data->error)
+			return true;
+		return false;
+	}
+
+	/* Running in the IRQ thread, can sleep */
+	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+							 host->timeout);
+	if (host->sd_error) {
+		dev_err(host->mmc->parent,
+			"Error IRQ while waiting for DMA completion!\n");
+		/* Woken up by an error IRQ: abort DMA */
+		if (host->data->flags & MMC_DATA_READ)
+			dmaengine_terminate_all(host->chan_rx);
+		else
+			dmaengine_terminate_all(host->chan_tx);
+		host->data->error = sh_mmcif_error_manage(host);
+	} else if (!time) {
+		host->data->error = -ETIMEDOUT;
+	} else if (time < 0) {
+		host->data->error = time;
+	}
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	host->dma_active = false;
+
+	if (host->data->error)
+		host->data->bytes_xfered = 0;
+
+	return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+	struct sh_mmcif_host *host = dev_id;
+	struct mmc_request *mrq = host->mrq;
+
+	cancel_delayed_work_sync(&host->timeout_work);
+
+	/*
+	 * All handlers return true, if processing continues, and false, if the
+	 * request has to be completed - successfully or not
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_REQUEST:
+		/* We're too late, the timeout has already kicked in */
+		return IRQ_HANDLED;
+	case MMCIF_WAIT_FOR_CMD:
+		if (sh_mmcif_end_cmd(host))
+			/* Wait for data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+		if (sh_mmcif_mread_block(host))
+			/* Wait for more data */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_READ:
+		if (sh_mmcif_read_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_MWRITE:
+		if (sh_mmcif_mwrite_block(host))
+			/* Wait data to write */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_WRITE:
+		if (sh_mmcif_write_block(host))
+			/* Wait for data end */
+			return IRQ_HANDLED;
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		if (host->sd_error) {
+			mrq->stop->error = sh_mmcif_error_manage(host);
+			break;
+		}
+		sh_mmcif_get_cmd12response(host, mrq->stop);
+		mrq->stop->error = 0;
+		break;
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		if (host->sd_error)
+			mrq->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+		host->data = NULL;
+
+		if (!mrq->cmd->error && mrq->data && !mrq->data->error)
+			mrq->data->bytes_xfered =
+				mrq->data->blocks * mrq->data->blksz;
+
+		if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
+			sh_mmcif_stop_cmd(host, mrq);
+			if (!mrq->stop->error)
+				return IRQ_HANDLED;
+		}
+	}
+
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->state = STATE_IDLE;
+	mmc_request_done(host->mmc, mrq);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
@@ -993,14 +1184,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		host->sd_error = true;
 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
-		complete(&host->intr_wait);
-	else
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+		if (!host->dma_active)
+			return IRQ_WAKE_THREAD;
+		else if (host->sd_error)
+			mmcif_dma_complete(host);
+	} else {
 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+	}
 
 	return IRQ_HANDLED;
 }
 
+static void mmcif_timeout_work(struct work_struct *work)
+{
+	struct delayed_work *d = container_of(work, struct delayed_work, work);
+	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+	struct mmc_request *mrq = host->mrq;
+
+	if (host->dying)
+		/* Don't run after mmc_remove_host() */
+		return;
+
+	/*
+	 * Handle races with cancel_delayed_work(), unless
+	 * cancel_delayed_work_sync() is used
+	 */
+	switch (host->wait_for) {
+	case MMCIF_WAIT_FOR_CMD:
+		mrq->cmd->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_STOP:
+		mrq->stop->error = sh_mmcif_error_manage(host);
+		break;
+	case MMCIF_WAIT_FOR_MREAD:
+	case MMCIF_WAIT_FOR_MWRITE:
+	case MMCIF_WAIT_FOR_READ:
+	case MMCIF_WAIT_FOR_WRITE:
+	case MMCIF_WAIT_FOR_READ_END:
+	case MMCIF_WAIT_FOR_WRITE_END:
+		host->data->error = sh_mmcif_error_manage(host);
+		break;
+	default:
+		BUG();
+	}
+
+	host->state = STATE_IDLE;
+	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+	host->data = NULL;
+	host->mrq = NULL;
+	mmc_request_done(host->mmc, mrq);
+}
+
 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 {
 	int ret = 0, irq[2];
@@ -1054,7 +1289,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_completion(&host->intr_wait);
 	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
@@ -1091,18 +1325,20 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
-	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up3;
 	}
-	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up3;
 	}
 
+	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
 	mmc_detect_change(host->mmc, 0);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1129,11 +1365,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	host->dying = true;
 	pm_runtime_get_sync(&pdev->dev);
 
 	mmc_remove_host(host->mmc);
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
+	/*
+	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+	 * mmc_remove_host() call above. But swapping order doesn't help either
+	 * (a query on the linux-mmc mailing list didn't bring any replies).
+	 */
+	cancel_delayed_work_sync(&host->timeout_work);
+
 	if (host->addr)
 		iounmap(host->addr);
 
-- 
1.7.2.5


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
  2011-12-25 20:07       ` Guennadi Liakhovetski
@ 2011-12-25 22:43         ` Chris Ball
  -1 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-25 22:43 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Guennadi,

On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
>> This patch (3/4) no longer applies to mmc-next -- mind resending?
>
> This one should be ok.

Seems to be worse, for some reason:

patching file drivers/mmc/host/sh_mmcif.c
Hunk #2 succeeded at 189 (offset -5 lines).
Hunk #3 FAILED at 217.
Hunk #4 succeeded at 503 (offset -4 lines).
Hunk #5 FAILED at 780.
Hunk #6 FAILED at 802.
Hunk #7 FAILED at 835.
Hunk #8 succeeded at 908 (offset 6 lines).
Hunk #9 FAILED at 942.
Hunk #10 succeeded at 1044 with fuzz 2 (offset 11 lines).
Hunk #11 succeeded at 1246 (offset 11 lines).
Hunk #12 succeeded at 1351 (offset 11 lines).
Hunk #13 FAILED at 1376.
Hunk #14 succeeded at 1425 (offset 11 lines).
6 out of 14 hunks FAILED -- saving rejects to file drivers/mmc/host/sh_mmcif.c.rej

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-25 22:43         ` Chris Ball
  0 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-25 22:43 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Guennadi,

On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
>> This patch (3/4) no longer applies to mmc-next -- mind resending?
>
> This one should be ok.

Seems to be worse, for some reason:

patching file drivers/mmc/host/sh_mmcif.c
Hunk #2 succeeded at 189 (offset -5 lines).
Hunk #3 FAILED at 217.
Hunk #4 succeeded at 503 (offset -4 lines).
Hunk #5 FAILED at 780.
Hunk #6 FAILED at 802.
Hunk #7 FAILED at 835.
Hunk #8 succeeded at 908 (offset 6 lines).
Hunk #9 FAILED at 942.
Hunk #10 succeeded at 1044 with fuzz 2 (offset 11 lines).
Hunk #11 succeeded at 1246 (offset 11 lines).
Hunk #12 succeeded at 1351 (offset 11 lines).
Hunk #13 FAILED at 1376.
Hunk #14 succeeded at 1425 (offset 11 lines).
6 out of 14 hunks FAILED -- saving rejects to file drivers/mmc/host/sh_mmcif.c.rej

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
  2011-12-25 22:43         ` Chris Ball
@ 2011-12-25 23:00           ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-25 23:00 UTC (permalink / raw)
  To: Chris Ball; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Chris

On Sun, 25 Dec 2011, Chris Ball wrote:

> Hi Guennadi,
> 
> On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
> >> This patch (3/4) no longer applies to mmc-next -- mind resending?
> >
> > This one should be ok.
> 
> Seems to be worse, for some reason:
> 
> patching file drivers/mmc/host/sh_mmcif.c
> Hunk #2 succeeded at 189 (offset -5 lines).
> Hunk #3 FAILED at 217.
> Hunk #4 succeeded at 503 (offset -4 lines).
> Hunk #5 FAILED at 780.
> Hunk #6 FAILED at 802.
> Hunk #7 FAILED at 835.
> Hunk #8 succeeded at 908 (offset 6 lines).
> Hunk #9 FAILED at 942.
> Hunk #10 succeeded at 1044 with fuzz 2 (offset 11 lines).
> Hunk #11 succeeded at 1246 (offset 11 lines).
> Hunk #12 succeeded at 1351 (offset 11 lines).
> Hunk #13 FAILED at 1376.
> Hunk #14 succeeded at 1425 (offset 11 lines).
> 6 out of 14 hunks FAILED -- saving rejects to file drivers/mmc/host/sh_mmcif.c.rej

Hmmm, that's very weird - I rebased my patch series on top of your 
mmc-next from a couple of hours ago. Sorry for asking, but you've also 
applied the leading two patches from this series, right? This is what my 
log of this driver looks like now:

6bf5ca1 mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member
8777cc4 mmc: sh_mmcif: process requests asynchronously
0295d69 mmc: sh_mmcif: cosmetic clean up
74dfec8 mmc: sh_mmcif: process error interrupts first
9092a17 mmc: convert drivers/mmc/host/* to use module_platform_driver()

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-25 23:00           ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-25 23:00 UTC (permalink / raw)
  To: Chris Ball; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Chris

On Sun, 25 Dec 2011, Chris Ball wrote:

> Hi Guennadi,
> 
> On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
> >> This patch (3/4) no longer applies to mmc-next -- mind resending?
> >
> > This one should be ok.
> 
> Seems to be worse, for some reason:
> 
> patching file drivers/mmc/host/sh_mmcif.c
> Hunk #2 succeeded at 189 (offset -5 lines).
> Hunk #3 FAILED at 217.
> Hunk #4 succeeded at 503 (offset -4 lines).
> Hunk #5 FAILED at 780.
> Hunk #6 FAILED at 802.
> Hunk #7 FAILED at 835.
> Hunk #8 succeeded at 908 (offset 6 lines).
> Hunk #9 FAILED at 942.
> Hunk #10 succeeded at 1044 with fuzz 2 (offset 11 lines).
> Hunk #11 succeeded at 1246 (offset 11 lines).
> Hunk #12 succeeded at 1351 (offset 11 lines).
> Hunk #13 FAILED at 1376.
> Hunk #14 succeeded at 1425 (offset 11 lines).
> 6 out of 14 hunks FAILED -- saving rejects to file drivers/mmc/host/sh_mmcif.c.rej

Hmmm, that's very weird - I rebased my patch series on top of your 
mmc-next from a couple of hours ago. Sorry for asking, but you've also 
applied the leading two patches from this series, right? This is what my 
log of this driver looks like now:

6bf5ca1 mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member
8777cc4 mmc: sh_mmcif: process requests asynchronously
0295d69 mmc: sh_mmcif: cosmetic clean up
74dfec8 mmc: sh_mmcif: process error interrupts first
9092a17 mmc: convert drivers/mmc/host/* to use module_platform_driver()

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
  2011-12-25 23:00           ` Guennadi Liakhovetski
@ 2011-12-26  1:48             ` Chris Ball
  -1 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-26  1:48 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi,

On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
> Hmmm, that's very weird - I rebased my patch series on top of your 
> mmc-next from a couple of hours ago. Sorry for asking, but you've also 
> applied the leading two patches from this series, right? This is what my 
> log of this driver looks like now:

Oops, that's it, thanks.

Now I have:

13cb975 (HEAD, mmc-next) mmc: sh_mmcif: cosmetic clean up
9e66e1c mmc: sh_mmcif: process error interrupts first
9092a17 mmc: convert drivers/mmc/host/* to use module_platform_driver()
2736566 mmc: sh_mmcif: simplify clock divisor calculation
58f1934 mmc: sh_mmcif: fix clock gating on platforms with a .down_pwr() method
88b4767 mmc: Add module.h to drivers/mmc users assuming implicit presence.
714c4a6 mmc: sh_mmcif: simplify platform data
c9b0cef mmc: sh_mmcif: maximize power saving

But 4/4 ("mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data
member") isn't applying without fuzz (offset 13 lines).  Any ideas/want
to resend it?

Thanks,

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-26  1:48             ` Chris Ball
  0 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-26  1:48 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi,

On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
> Hmmm, that's very weird - I rebased my patch series on top of your 
> mmc-next from a couple of hours ago. Sorry for asking, but you've also 
> applied the leading two patches from this series, right? This is what my 
> log of this driver looks like now:

Oops, that's it, thanks.

Now I have:

13cb975 (HEAD, mmc-next) mmc: sh_mmcif: cosmetic clean up
9e66e1c mmc: sh_mmcif: process error interrupts first
9092a17 mmc: convert drivers/mmc/host/* to use module_platform_driver()
2736566 mmc: sh_mmcif: simplify clock divisor calculation
58f1934 mmc: sh_mmcif: fix clock gating on platforms with a .down_pwr() method
88b4767 mmc: Add module.h to drivers/mmc users assuming implicit presence.
714c4a6 mmc: sh_mmcif: simplify platform data
c9b0cef mmc: sh_mmcif: maximize power saving

But 4/4 ("mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data
member") isn't applying without fuzz (offset 13 lines).  Any ideas/want
to resend it?

Thanks,

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
  2011-12-26  1:48             ` Chris Ball
@ 2011-12-26  8:44               ` Guennadi Liakhovetski
  -1 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-26  8:44 UTC (permalink / raw)
  To: Chris Ball; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Chris

On Sun, 25 Dec 2011, Chris Ball wrote:

> Hi,
> 
> On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
> > Hmmm, that's very weird - I rebased my patch series on top of your 
> > mmc-next from a couple of hours ago. Sorry for asking, but you've also 
> > applied the leading two patches from this series, right? This is what my 
> > log of this driver looks like now:
> 
> Oops, that's it, thanks.
> 
> Now I have:
> 
> 13cb975 (HEAD, mmc-next) mmc: sh_mmcif: cosmetic clean up
> 9e66e1c mmc: sh_mmcif: process error interrupts first
> 9092a17 mmc: convert drivers/mmc/host/* to use module_platform_driver()
> 2736566 mmc: sh_mmcif: simplify clock divisor calculation
> 58f1934 mmc: sh_mmcif: fix clock gating on platforms with a .down_pwr() method
> 88b4767 mmc: Add module.h to drivers/mmc users assuming implicit presence.
> 714c4a6 mmc: sh_mmcif: simplify platform data
> c9b0cef mmc: sh_mmcif: maximize power saving
> 
> But 4/4 ("mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data
> member") isn't applying without fuzz (offset 13 lines).  Any ideas/want
> to resend it?

I can resend it if you like, but in fact that fuzz is harmless - you can 
go ahead with the patch you have at hand. But, please, do let me know if 
you prefer to have an updated patch.

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-26  8:44               ` Guennadi Liakhovetski
  0 siblings, 0 replies; 30+ messages in thread
From: Guennadi Liakhovetski @ 2011-12-26  8:44 UTC (permalink / raw)
  To: Chris Ball; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi Chris

On Sun, 25 Dec 2011, Chris Ball wrote:

> Hi,
> 
> On Sun, Dec 25 2011, Guennadi Liakhovetski wrote:
> > Hmmm, that's very weird - I rebased my patch series on top of your 
> > mmc-next from a couple of hours ago. Sorry for asking, but you've also 
> > applied the leading two patches from this series, right? This is what my 
> > log of this driver looks like now:
> 
> Oops, that's it, thanks.
> 
> Now I have:
> 
> 13cb975 (HEAD, mmc-next) mmc: sh_mmcif: cosmetic clean up
> 9e66e1c mmc: sh_mmcif: process error interrupts first
> 9092a17 mmc: convert drivers/mmc/host/* to use module_platform_driver()
> 2736566 mmc: sh_mmcif: simplify clock divisor calculation
> 58f1934 mmc: sh_mmcif: fix clock gating on platforms with a .down_pwr() method
> 88b4767 mmc: Add module.h to drivers/mmc users assuming implicit presence.
> 714c4a6 mmc: sh_mmcif: simplify platform data
> c9b0cef mmc: sh_mmcif: maximize power saving
> 
> But 4/4 ("mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data
> member") isn't applying without fuzz (offset 13 lines).  Any ideas/want
> to resend it?

I can resend it if you like, but in fact that fuzz is harmless - you can 
go ahead with the patch you have at hand. But, please, do let me know if 
you prefer to have an updated patch.

Thanks
Guennadi
---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
  2011-12-26  8:44               ` Guennadi Liakhovetski
@ 2011-12-26 17:59                 ` Chris Ball
  -1 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-26 17:59 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi,

On Mon, Dec 26 2011, Guennadi Liakhovetski wrote:
> I can resend it if you like, but in fact that fuzz is harmless - you can 
> go ahead with the patch you have at hand. But, please, do let me know if 
> you prefer to have an updated patch.

Okay, pushed to mmc-next for 3.3 -- please just check everything looks
reasonable there.  Thanks,

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 3/4 v3] mmc: sh_mmcif: process requests asynchronously
@ 2011-12-26 17:59                 ` Chris Ball
  0 siblings, 0 replies; 30+ messages in thread
From: Chris Ball @ 2011-12-26 17:59 UTC (permalink / raw)
  To: Guennadi Liakhovetski; +Cc: linux-mmc, linux-sh, Magnus Damm

Hi,

On Mon, Dec 26 2011, Guennadi Liakhovetski wrote:
> I can resend it if you like, but in fact that fuzz is harmless - you can 
> go ahead with the patch you have at hand. But, please, do let me know if 
> you prefer to have an updated patch.

Okay, pushed to mmc-next for 3.3 -- please just check everything looks
reasonable there.  Thanks,

- Chris.
-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2011-12-26 18:00 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-11-30 15:07 [PATCH] mmc: sh_mmcif: process requests asynchronously Guennadi Liakhovetski
2011-11-30 15:07 ` Guennadi Liakhovetski
2011-12-02 15:24 ` Magnus Damm
2011-12-02 15:24   ` Magnus Damm
2011-12-02 15:54   ` Guennadi Liakhovetski
2011-12-02 15:54     ` Guennadi Liakhovetski
2011-12-14 18:31 ` [PATCH 0/4 v2] mmc_ " Guennadi Liakhovetski
2011-12-14 18:31   ` Guennadi Liakhovetski
2011-12-14 18:31 ` [PATCH 1/4] mmc: sh_mmcif: process error interrupts first Guennadi Liakhovetski
2011-12-14 18:31   ` Guennadi Liakhovetski
2011-12-14 18:31 ` [PATCH 2/4 v2] mmc: sh_mmcif: cosmetic clean up Guennadi Liakhovetski
2011-12-14 18:31   ` Guennadi Liakhovetski
2011-12-14 18:31 ` [PATCH 3/4 v2] mmc: sh_mmcif: process requests asynchronously Guennadi Liakhovetski
2011-12-14 18:31   ` Guennadi Liakhovetski
2011-12-25  2:20   ` Chris Ball
2011-12-25  2:20     ` Chris Ball
2011-12-25 20:07     ` [PATCH 3/4 v3] " Guennadi Liakhovetski
2011-12-25 20:07       ` Guennadi Liakhovetski
2011-12-25 22:43       ` Chris Ball
2011-12-25 22:43         ` Chris Ball
2011-12-25 23:00         ` Guennadi Liakhovetski
2011-12-25 23:00           ` Guennadi Liakhovetski
2011-12-26  1:48           ` Chris Ball
2011-12-26  1:48             ` Chris Ball
2011-12-26  8:44             ` Guennadi Liakhovetski
2011-12-26  8:44               ` Guennadi Liakhovetski
2011-12-26 17:59               ` Chris Ball
2011-12-26 17:59                 ` Chris Ball
2011-12-14 18:31 ` [PATCH 4/4] mmc: sh_mmcif: remove now superfluous sh_mmcif_host::data member Guennadi Liakhovetski
2011-12-14 18:31   ` Guennadi Liakhovetski

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.