All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
To: Ulf Hansson <ulf.hansson@linaro.org>
Cc: linux-mmc <linux-mmc@vger.kernel.org>, Simon <horms@verge.net.au>,
	Magnus <magnus.damm@gmail.com>,
	Linux-SH <linux-sh@vger.kernel.org>,
	Laurent <laurent.pinchart@ideasonboard.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	kobayashi <keita.kobayashi.ym@renesas.com>
Subject: [PATCH 1/5 v6] mmc: sh_mmcif: add sh_mmcif_host_to_dev() macro and use it.
Date: Thu, 14 May 2015 07:21:18 +0000	[thread overview]
Message-ID: <874mnfpqwl.wl%kuninori.morimoto.gx@renesas.com> (raw)
In-Reply-To: <87617vpqy3.wl%kuninori.morimoto.gx@renesas.com>

From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>

Current sh_mmcif driver is directly using &host->pd->dev in all place.
It is not big problem, but it is unreadable, and it can be cause of
future bug. This patch adds new sh_mmcif_host_to_dev() and use it.

Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Tested-by: Keita Kobayashi <keita.kobayashi.ym@renesas.com>
---
v5 -> v6

 - no change

 drivers/mmc/host/sh_mmcif.c | 114 +++++++++++++++++++++++++++-----------------
 1 file changed, 71 insertions(+), 43 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 068b9a6..29835ae 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -262,6 +262,8 @@ static const struct of_device_id mmcif_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, mmcif_of_match);
 
+#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
+
 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 					unsigned int reg, u32 val)
 {
@@ -278,11 +280,12 @@ static void mmcif_dma_complete(void *arg)
 {
 	struct sh_mmcif_host *host = arg;
 	struct mmc_request *mrq = host->mrq;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 
-	dev_dbg(&host->pd->dev, "Command completed\n");
+	dev_dbg(dev, "Command completed\n");
 
 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
-		 dev_name(&host->pd->dev)))
+		 dev_name(dev)))
 		return;
 
 	complete(&host->dma_complete);
@@ -294,6 +297,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
@@ -312,7 +316,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
 		dma_async_issue_pending(chan);
 	}
-	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
@@ -328,12 +332,12 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 			host->chan_tx = NULL;
 			dma_release_channel(chan);
 		}
-		dev_warn(&host->pd->dev,
+		dev_warn(dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 	}
 
-	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
 		desc, cookie, data->sg_len);
 }
 
@@ -343,6 +347,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
@@ -361,7 +366,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
 		dma_async_issue_pending(chan);
 	}
-	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
@@ -377,12 +382,12 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 			host->chan_rx = NULL;
 			dma_release_channel(chan);
 		}
-		dev_warn(&host->pd->dev,
+		dev_warn(dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 	}
 
-	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
+	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
 		desc, cookie);
 }
 
@@ -395,6 +400,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
 	struct dma_chan *chan;
 	void *slave_data = NULL;
 	struct resource *res;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	dma_cap_mask_t mask;
 	int ret;
 
@@ -407,10 +413,10 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
 			(void *)pdata->slave_id_rx;
 
 	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
-				slave_data, &host->pd->dev,
+				slave_data, dev,
 				direction = DMA_MEM_TO_DEV ? "tx" : "rx");
 
-	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
+	dev_dbg(dev, "%s: %s: got channel %p\n", __func__,
 		direction = DMA_MEM_TO_DEV ? "TX" : "RX", chan);
 
 	if (!chan)
@@ -440,12 +446,13 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
 				 struct sh_mmcif_plat_data *pdata)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	host->dma_active = false;
 
 	if (pdata) {
 		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
 			return;
-	} else if (!host->pd->dev.of_node) {
+	} else if (!dev->of_node) {
 		return;
 	}
 
@@ -481,7 +488,8 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
 
 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 {
-	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
+	struct sh_mmcif_plat_data *p = dev->platform_data;
 	bool sup_pclk = p ? p->sup_pclk : false;
 	unsigned int current_clk = clk_get_rate(host->clk);
 
@@ -520,6 +528,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 
 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	u32 state1, state2;
 	int ret, timeout;
 
@@ -527,8 +536,8 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 
 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
-	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
-	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
+	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
+	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
 
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
@@ -540,25 +549,25 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 			mdelay(1);
 		}
 		if (!timeout) {
-			dev_err(&host->pd->dev,
+			dev_err(dev,
 				"Forced end of command sequence timeout err\n");
 			return -EIO;
 		}
 		sh_mmcif_sync_reset(host);
-		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
+		dev_dbg(dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
+		dev_err(dev, " CRC error: state %u, wait %u\n",
 			host->state, host->wait_for);
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
+		dev_err(dev, " Timeout: state %u, wait %u\n",
 			host->state, host->wait_for);
 		ret = -ETIMEDOUT;
 	} else {
-		dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
+		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
 			host->state, host->wait_for);
 		ret = -EIO;
 	}
@@ -599,13 +608,14 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = sg_virt(data->sg);
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -640,13 +650,14 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = host->pio_ptr;
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -677,13 +688,14 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = sg_virt(data->sg);
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -718,13 +730,14 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = host->pio_ptr;
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -762,6 +775,7 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 			    struct mmc_request *mrq)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = mrq->data;
 	struct mmc_command *cmd = mrq->cmd;
 	u32 opc = cmd->opcode;
@@ -781,7 +795,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 		tmp |= CMD_SET_RTYP_17B;
 		break;
 	default:
-		dev_err(&host->pd->dev, "Unsupported response type.\n");
+		dev_err(dev, "Unsupported response type.\n");
 		break;
 	}
 	switch (opc) {
@@ -809,7 +823,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 			tmp |= CMD_SET_DATW_8;
 			break;
 		default:
-			dev_err(&host->pd->dev, "Unsupported bus width.\n");
+			dev_err(dev, "Unsupported bus width.\n");
 			break;
 		}
 		switch (host->timing) {
@@ -852,6 +866,8 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 			       struct mmc_request *mrq, u32 opc)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
+
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
 		sh_mmcif_multi_read(host, mrq);
@@ -867,7 +883,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 		sh_mmcif_single_read(host, mrq);
 		return 0;
 	default:
-		dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
+		dev_err(dev, "Unsupported CMD%d\n", opc);
 		return -EINVAL;
 	}
 }
@@ -924,6 +940,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
+
 	switch (mrq->cmd->opcode) {
 	case MMC_READ_MULTIPLE_BLOCK:
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -932,7 +950,7 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 		break;
 	default:
-		dev_err(&host->pd->dev, "unsupported stop cmd\n");
+		dev_err(dev, "unsupported stop cmd\n");
 		mrq->stop->error = sh_mmcif_error_manage(host);
 		return;
 	}
@@ -943,11 +961,13 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (host->state != STATE_IDLE) {
-		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+		dev_dbg(dev, "%s() rejected, state %u\n",
+			__func__, host->state);
 		spin_unlock_irqrestore(&host->lock, flags);
 		mrq->cmd->error = -EAGAIN;
 		mmc_request_done(mmc, mrq);
@@ -999,11 +1019,13 @@ static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (host->state != STATE_IDLE) {
-		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+		dev_dbg(dev, "%s() rejected, state %u\n",
+			__func__, host->state);
 		spin_unlock_irqrestore(&host->lock, flags);
 		return;
 	}
@@ -1014,7 +1036,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 	if (ios->power_mode = MMC_POWER_UP) {
 		if (!host->card_present) {
 			/* See if we also get DMA */
-			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
+			sh_mmcif_request_dma(host, dev->platform_data);
 			host->card_present = true;
 		}
 		sh_mmcif_set_power(host, ios);
@@ -1028,7 +1050,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 			}
 		}
 		if (host->power) {
-			pm_runtime_put_sync(&host->pd->dev);
+			pm_runtime_put_sync(dev);
 			clk_disable_unprepare(host->clk);
 			host->power = false;
 			if (ios->power_mode = MMC_POWER_OFF)
@@ -1042,7 +1064,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		if (!host->power) {
 			clk_prepare_enable(host->clk);
 
-			pm_runtime_get_sync(&host->pd->dev);
+			pm_runtime_get_sync(dev);
 			host->power = true;
 			sh_mmcif_sync_reset(host);
 		}
@@ -1057,7 +1079,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 static int sh_mmcif_get_cd(struct mmc_host *mmc)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
-	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
+	struct sh_mmcif_plat_data *p = dev->platform_data;
 	int ret = mmc_gpio_get_cd(mmc);
 
 	if (ret >= 0)
@@ -1079,6 +1102,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 {
 	struct mmc_command *cmd = host->mrq->cmd;
 	struct mmc_data *data = host->mrq->data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	long time;
 
 	if (host->sd_error) {
@@ -1092,7 +1116,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 			cmd->error = sh_mmcif_error_manage(host);
 			break;
 		}
-		dev_dbg(&host->pd->dev, "CMD%d error %d\n",
+		dev_dbg(dev, "CMD%d error %d\n",
 			cmd->opcode, cmd->error);
 		host->sd_error = false;
 		return false;
@@ -1172,6 +1196,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
 	struct mmc_request *mrq;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	bool wait = false;
 	unsigned long flags;
 	int wait_work;
@@ -1186,7 +1211,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 
 	mrq = host->mrq;
 	if (!mrq) {
-		dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
+		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
 			host->state, host->wait_for);
 		mutex_unlock(&host->thread_lock);
 		return IRQ_HANDLED;
@@ -1224,7 +1249,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 	case MMCIF_WAIT_FOR_STOP:
 		if (host->sd_error) {
 			mrq->stop->error = sh_mmcif_error_manage(host);
-			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
+			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
 			break;
 		}
 		sh_mmcif_get_cmd12response(host, mrq->stop);
@@ -1234,7 +1259,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 	case MMCIF_WAIT_FOR_WRITE_END:
 		if (host->sd_error) {
 			mrq->data->error = sh_mmcif_error_manage(host);
-			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
+			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
 		}
 		break;
 	default:
@@ -1277,6 +1302,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	u32 state, mask;
 
 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
@@ -1288,22 +1314,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
 
 	if (state & ~MASK_CLEAN)
-		dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
+		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
 			state);
 
 	if (state & INT_ERR_STS || state & ~INT_ALL) {
 		host->sd_error = true;
-		dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
+		dev_dbg(dev, "int err state = 0x%08x\n", state);
 	}
 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
 		if (!host->mrq)
-			dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
+			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
 		if (!host->dma_active)
 			return IRQ_WAKE_THREAD;
 		else if (host->sd_error)
 			mmcif_dma_complete(host);
 	} else {
-		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
 	}
 
 	return IRQ_HANDLED;
@@ -1314,6 +1340,7 @@ static void mmcif_timeout_work(struct work_struct *work)
 	struct delayed_work *d = container_of(work, struct delayed_work, work);
 	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
 	struct mmc_request *mrq = host->mrq;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	unsigned long flags;
 
 	if (host->dying)
@@ -1326,7 +1353,7 @@ static void mmcif_timeout_work(struct work_struct *work)
 		return;
 	}
 
-	dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
+	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
 		host->wait_for, mrq->cmd->opcode);
 
 	host->state = STATE_TIMEOUT;
@@ -1363,7 +1390,8 @@ static void mmcif_timeout_work(struct work_struct *work)
 
 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
 {
-	struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
+	struct sh_mmcif_plat_data *pd = dev->platform_data;
 	struct mmc_host *mmc = host->mmc;
 
 	mmc_regulator_get_supply(mmc);
-- 
1.9.1


WARNING: multiple messages have this Message-ID (diff)
From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
To: Ulf Hansson <ulf.hansson@linaro.org>
Cc: linux-mmc <linux-mmc@vger.kernel.org>, Simon <horms@verge.net.au>,
	Magnus <magnus.damm@gmail.com>,
	Linux-SH <linux-sh@vger.kernel.org>,
	Laurent <laurent.pinchart@ideasonboard.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	kobayashi <keita.kobayashi.ym@renesas.com>
Subject: [PATCH 1/5 v6] mmc: sh_mmcif: add sh_mmcif_host_to_dev() macro and use it.
Date: Thu, 14 May 2015 07:21:18 +0000	[thread overview]
Message-ID: <874mnfpqwl.wl%kuninori.morimoto.gx@renesas.com> (raw)
In-Reply-To: <87617vpqy3.wl%kuninori.morimoto.gx@renesas.com>

From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>

Current sh_mmcif driver is directly using &host->pd->dev in all place.
It is not big problem, but it is unreadable, and it can be cause of
future bug. This patch adds new sh_mmcif_host_to_dev() and use it.

Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Tested-by: Keita Kobayashi <keita.kobayashi.ym@renesas.com>
---
v5 -> v6

 - no change

 drivers/mmc/host/sh_mmcif.c | 114 +++++++++++++++++++++++++++-----------------
 1 file changed, 71 insertions(+), 43 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 068b9a6..29835ae 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -262,6 +262,8 @@ static const struct of_device_id mmcif_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, mmcif_of_match);
 
+#define sh_mmcif_host_to_dev(host) (&host->pd->dev)
+
 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 					unsigned int reg, u32 val)
 {
@@ -278,11 +280,12 @@ static void mmcif_dma_complete(void *arg)
 {
 	struct sh_mmcif_host *host = arg;
 	struct mmc_request *mrq = host->mrq;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 
-	dev_dbg(&host->pd->dev, "Command completed\n");
+	dev_dbg(dev, "Command completed\n");
 
 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
-		 dev_name(&host->pd->dev)))
+		 dev_name(dev)))
 		return;
 
 	complete(&host->dma_complete);
@@ -294,6 +297,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
@@ -312,7 +316,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
 		dma_async_issue_pending(chan);
 	}
-	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
@@ -328,12 +332,12 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
 			host->chan_tx = NULL;
 			dma_release_channel(chan);
 		}
-		dev_warn(&host->pd->dev,
+		dev_warn(dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 	}
 
-	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
 		desc, cookie, data->sg_len);
 }
 
@@ -343,6 +347,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 	struct scatterlist *sg = data->sg;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	dma_cookie_t cookie = -EINVAL;
 	int ret;
 
@@ -361,7 +366,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
 		dma_async_issue_pending(chan);
 	}
-	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
 		__func__, data->sg_len, ret, cookie);
 
 	if (!desc) {
@@ -377,12 +382,12 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
 			host->chan_rx = NULL;
 			dma_release_channel(chan);
 		}
-		dev_warn(&host->pd->dev,
+		dev_warn(dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
 	}
 
-	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
+	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
 		desc, cookie);
 }
 
@@ -395,6 +400,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
 	struct dma_chan *chan;
 	void *slave_data = NULL;
 	struct resource *res;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	dma_cap_mask_t mask;
 	int ret;
 
@@ -407,10 +413,10 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
 			(void *)pdata->slave_id_rx;
 
 	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
-				slave_data, &host->pd->dev,
+				slave_data, dev,
 				direction == DMA_MEM_TO_DEV ? "tx" : "rx");
 
-	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
+	dev_dbg(dev, "%s: %s: got channel %p\n", __func__,
 		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
 
 	if (!chan)
@@ -440,12 +446,13 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
 				 struct sh_mmcif_plat_data *pdata)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	host->dma_active = false;
 
 	if (pdata) {
 		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
 			return;
-	} else if (!host->pd->dev.of_node) {
+	} else if (!dev->of_node) {
 		return;
 	}
 
@@ -481,7 +488,8 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
 
 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 {
-	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
+	struct sh_mmcif_plat_data *p = dev->platform_data;
 	bool sup_pclk = p ? p->sup_pclk : false;
 	unsigned int current_clk = clk_get_rate(host->clk);
 
@@ -520,6 +528,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
 
 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	u32 state1, state2;
 	int ret, timeout;
 
@@ -527,8 +536,8 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 
 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
-	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
-	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
+	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
+	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
 
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
@@ -540,25 +549,25 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
 			mdelay(1);
 		}
 		if (!timeout) {
-			dev_err(&host->pd->dev,
+			dev_err(dev,
 				"Forced end of command sequence timeout err\n");
 			return -EIO;
 		}
 		sh_mmcif_sync_reset(host);
-		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
+		dev_dbg(dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
+		dev_err(dev, " CRC error: state %u, wait %u\n",
 			host->state, host->wait_for);
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
+		dev_err(dev, " Timeout: state %u, wait %u\n",
 			host->state, host->wait_for);
 		ret = -ETIMEDOUT;
 	} else {
-		dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
+		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
 			host->state, host->wait_for);
 		ret = -EIO;
 	}
@@ -599,13 +608,14 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = sg_virt(data->sg);
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -640,13 +650,14 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = host->pio_ptr;
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -677,13 +688,14 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = sg_virt(data->sg);
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -718,13 +730,14 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
 
 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = host->mrq->data;
 	u32 *p = host->pio_ptr;
 	int i;
 
 	if (host->sd_error) {
 		data->error = sh_mmcif_error_manage(host);
-		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
+		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
 		return false;
 	}
 
@@ -762,6 +775,7 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 			    struct mmc_request *mrq)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	struct mmc_data *data = mrq->data;
 	struct mmc_command *cmd = mrq->cmd;
 	u32 opc = cmd->opcode;
@@ -781,7 +795,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 		tmp |= CMD_SET_RTYP_17B;
 		break;
 	default:
-		dev_err(&host->pd->dev, "Unsupported response type.\n");
+		dev_err(dev, "Unsupported response type.\n");
 		break;
 	}
 	switch (opc) {
@@ -809,7 +823,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 			tmp |= CMD_SET_DATW_8;
 			break;
 		default:
-			dev_err(&host->pd->dev, "Unsupported bus width.\n");
+			dev_err(dev, "Unsupported bus width.\n");
 			break;
 		}
 		switch (host->timing) {
@@ -852,6 +866,8 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 			       struct mmc_request *mrq, u32 opc)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
+
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
 		sh_mmcif_multi_read(host, mrq);
@@ -867,7 +883,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 		sh_mmcif_single_read(host, mrq);
 		return 0;
 	default:
-		dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
+		dev_err(dev, "Unsupported CMD%d\n", opc);
 		return -EINVAL;
 	}
 }
@@ -924,6 +940,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 			      struct mmc_request *mrq)
 {
+	struct device *dev = sh_mmcif_host_to_dev(host);
+
 	switch (mrq->cmd->opcode) {
 	case MMC_READ_MULTIPLE_BLOCK:
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -932,7 +950,7 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 		break;
 	default:
-		dev_err(&host->pd->dev, "unsupported stop cmd\n");
+		dev_err(dev, "unsupported stop cmd\n");
 		mrq->stop->error = sh_mmcif_error_manage(host);
 		return;
 	}
@@ -943,11 +961,13 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (host->state != STATE_IDLE) {
-		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+		dev_dbg(dev, "%s() rejected, state %u\n",
+			__func__, host->state);
 		spin_unlock_irqrestore(&host->lock, flags);
 		mrq->cmd->error = -EAGAIN;
 		mmc_request_done(mmc, mrq);
@@ -999,11 +1019,13 @@ static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	unsigned long flags;
 
 	spin_lock_irqsave(&host->lock, flags);
 	if (host->state != STATE_IDLE) {
-		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
+		dev_dbg(dev, "%s() rejected, state %u\n",
+			__func__, host->state);
 		spin_unlock_irqrestore(&host->lock, flags);
 		return;
 	}
@@ -1014,7 +1036,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 	if (ios->power_mode == MMC_POWER_UP) {
 		if (!host->card_present) {
 			/* See if we also get DMA */
-			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
+			sh_mmcif_request_dma(host, dev->platform_data);
 			host->card_present = true;
 		}
 		sh_mmcif_set_power(host, ios);
@@ -1028,7 +1050,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 			}
 		}
 		if (host->power) {
-			pm_runtime_put_sync(&host->pd->dev);
+			pm_runtime_put_sync(dev);
 			clk_disable_unprepare(host->clk);
 			host->power = false;
 			if (ios->power_mode == MMC_POWER_OFF)
@@ -1042,7 +1064,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		if (!host->power) {
 			clk_prepare_enable(host->clk);
 
-			pm_runtime_get_sync(&host->pd->dev);
+			pm_runtime_get_sync(dev);
 			host->power = true;
 			sh_mmcif_sync_reset(host);
 		}
@@ -1057,7 +1079,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 static int sh_mmcif_get_cd(struct mmc_host *mmc)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
-	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
+	struct sh_mmcif_plat_data *p = dev->platform_data;
 	int ret = mmc_gpio_get_cd(mmc);
 
 	if (ret >= 0)
@@ -1079,6 +1102,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 {
 	struct mmc_command *cmd = host->mrq->cmd;
 	struct mmc_data *data = host->mrq->data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	long time;
 
 	if (host->sd_error) {
@@ -1092,7 +1116,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
 			cmd->error = sh_mmcif_error_manage(host);
 			break;
 		}
-		dev_dbg(&host->pd->dev, "CMD%d error %d\n",
+		dev_dbg(dev, "CMD%d error %d\n",
 			cmd->opcode, cmd->error);
 		host->sd_error = false;
 		return false;
@@ -1172,6 +1196,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
 	struct mmc_request *mrq;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	bool wait = false;
 	unsigned long flags;
 	int wait_work;
@@ -1186,7 +1211,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 
 	mrq = host->mrq;
 	if (!mrq) {
-		dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
+		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
 			host->state, host->wait_for);
 		mutex_unlock(&host->thread_lock);
 		return IRQ_HANDLED;
@@ -1224,7 +1249,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 	case MMCIF_WAIT_FOR_STOP:
 		if (host->sd_error) {
 			mrq->stop->error = sh_mmcif_error_manage(host);
-			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
+			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
 			break;
 		}
 		sh_mmcif_get_cmd12response(host, mrq->stop);
@@ -1234,7 +1259,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 	case MMCIF_WAIT_FOR_WRITE_END:
 		if (host->sd_error) {
 			mrq->data->error = sh_mmcif_error_manage(host);
-			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
+			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
 		}
 		break;
 	default:
@@ -1277,6 +1302,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	u32 state, mask;
 
 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
@@ -1288,22 +1314,22 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
 
 	if (state & ~MASK_CLEAN)
-		dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
+		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
 			state);
 
 	if (state & INT_ERR_STS || state & ~INT_ALL) {
 		host->sd_error = true;
-		dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
+		dev_dbg(dev, "int err state = 0x%08x\n", state);
 	}
 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
 		if (!host->mrq)
-			dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
+			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
 		if (!host->dma_active)
 			return IRQ_WAKE_THREAD;
 		else if (host->sd_error)
 			mmcif_dma_complete(host);
 	} else {
-		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
 	}
 
 	return IRQ_HANDLED;
@@ -1314,6 +1340,7 @@ static void mmcif_timeout_work(struct work_struct *work)
 	struct delayed_work *d = container_of(work, struct delayed_work, work);
 	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
 	struct mmc_request *mrq = host->mrq;
+	struct device *dev = sh_mmcif_host_to_dev(host);
 	unsigned long flags;
 
 	if (host->dying)
@@ -1326,7 +1353,7 @@ static void mmcif_timeout_work(struct work_struct *work)
 		return;
 	}
 
-	dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
+	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
 		host->wait_for, mrq->cmd->opcode);
 
 	host->state = STATE_TIMEOUT;
@@ -1363,7 +1390,8 @@ static void mmcif_timeout_work(struct work_struct *work)
 
 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
 {
-	struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+	struct device *dev = sh_mmcif_host_to_dev(host);
+	struct sh_mmcif_plat_data *pd = dev->platform_data;
 	struct mmc_host *mmc = host->mmc;
 
 	mmc_regulator_get_supply(mmc);
-- 
1.9.1


  reply	other threads:[~2015-05-14  7:21 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <873840a4ch.wl%kuninori.morimoto.gx@renesas.com>
2015-04-21  3:49 ` mmc: sh_mmcif: add PLL support Kuninori Morimoto
2015-04-21  3:50   ` [PATCH 1/3] mmc: sh_mmcif: move mmcif_of_match to upside Kuninori Morimoto
2015-04-21  3:50   ` [PATCH 2/3] mmc: sh_mmcif: cleanup to use dev instead of &pdev->dev Kuninori Morimoto
2015-04-21  3:51   ` [PATCH 3/3] mmc: sh_mmcif: calculate best clock with PLL Kuninori Morimoto
2015-04-21  7:43     ` Kuninori Morimoto
2015-04-21  7:51       ` Laurent Pinchart
2015-04-21  7:58         ` Kuninori Morimoto
2015-04-21  7:53 ` [PATCH 0/3 v2] mmc: sh_mmcif: add PLL support Kuninori Morimoto
2015-04-21  7:54   ` [PATCH 1/3 v2] mmc: sh_mmcif: move mmcif_of_match to upside Kuninori Morimoto
2015-04-21  7:54   ` [PATCH 2/3 v2] mmc: sh_mmcif: cleanup to use dev instead of &pdev->dev Kuninori Morimoto
2015-04-21  7:55   ` [PATCH 3/3 v2] mmc: sh_mmcif: calculate best clock with parent clock Kuninori Morimoto
2015-04-21  8:23   ` [PATCH 0/3 v2] mmc: sh_mmcif: add PLL support Kuninori Morimoto
2015-04-21  8:26 ` [PATCH 0/3 v3] " Kuninori Morimoto
2015-04-21  8:26   ` [PATCH 1/3 v3] mmc: sh_mmcif: move mmcif_of_match to upside Kuninori Morimoto
2015-04-21 10:07     ` Geert Uytterhoeven
2015-04-21 10:07       ` Geert Uytterhoeven
2015-04-21  8:27   ` [PATCH 2/3 v3] mmc: sh_mmcif: cleanup to use dev instead of &pdev->dev Kuninori Morimoto
2015-04-21 10:07     ` Geert Uytterhoeven
2015-04-21 10:07       ` Geert Uytterhoeven
2015-04-21  8:31   ` [PATCH 3/3 v3] mmc: sh_mmcif: calculate best clock with parent clock Kuninori Morimoto
2015-04-21  8:31     ` Kuninori Morimoto
2015-04-21 10:31     ` Geert Uytterhoeven
2015-04-21 10:31       ` Geert Uytterhoeven
2015-04-21 13:07       ` Laurent Pinchart
2015-04-21 13:07         ` Laurent Pinchart
2015-04-22  1:05         ` Kuninori Morimoto
2015-05-04  1:04           ` Laurent Pinchart
2015-05-04  1:04             ` Laurent Pinchart
2015-05-11  2:15             ` Kuninori Morimoto
2015-05-11  2:15               ` Kuninori Morimoto
2015-04-22  1:04       ` Kuninori Morimoto
2015-04-22  7:49         ` Geert Uytterhoeven
2015-04-22  7:49           ` Geert Uytterhoeven
2015-04-22  8:18           ` Ulf Hansson
2015-04-22  8:18             ` Ulf Hansson
2015-04-22  8:22             ` Geert Uytterhoeven
2015-04-22  8:22               ` Geert Uytterhoeven
2015-04-22  9:16               ` Kuninori Morimoto
2015-04-23  8:11 ` [PATCH 0/7 v4] mmc: sh_mmcif: add parent clk support Kuninori Morimoto
2015-04-23  8:13   ` [PATCH 1/7 v4] mmc: sh_mmcif: move mmcif_of_match to upside Kuninori Morimoto
2015-04-23  8:14   ` [PATCH 2/7 v4] mmc: sh_mmcif: cleanup to use dev instead of &pdev->dev Kuninori Morimoto
2015-04-23  8:15   ` [PATCH 3/7 v4] mmc: sh_mmcif: remove unnecessary int clk from struct sh_mmcif_host Kuninori Morimoto
2015-04-23  8:15     ` Kuninori Morimoto
2015-04-23 10:01     ` Geert Uytterhoeven
2015-04-23 10:01       ` Geert Uytterhoeven
2015-04-23  8:16   ` [PATCH 4/7 v4] mmc: sh_mmcif: separate sh_mmcif_clk_update() into setup and prepare Kuninori Morimoto
2015-04-23 10:00     ` Geert Uytterhoeven
2015-04-23 10:00       ` Geert Uytterhoeven
2015-04-23  8:17   ` [PATCH 5/7 v4] mmc: sh_mmcif: calculate best clock with parent clock Kuninori Morimoto
2015-04-23  8:17     ` Kuninori Morimoto
2015-05-12 10:22     ` Laurent Pinchart
2015-05-12 10:22       ` Laurent Pinchart
2015-05-13  0:08       ` Kuninori Morimoto
2015-04-23  8:18   ` [PATCH 6/7 v4] ARM: shmobile: r8a7790: add MMCIF parent clock range Kuninori Morimoto
2015-05-07  5:26     ` Simon Horman
2015-05-07  5:26       ` Simon Horman
2015-05-11  2:53       ` Kuninori Morimoto
2015-05-11  5:39         ` Simon Horman
2015-05-11  5:39           ` Simon Horman
2015-04-23  8:18   ` [PATCH 7/7 v4] ARM: shmobile: r8a7791: " Kuninori Morimoto
2015-04-23 10:07   ` [PATCH 0/7 v4] mmc: sh_mmcif: add parent clk support Laurent Pinchart
2015-04-23 10:07     ` Laurent Pinchart
2015-05-05  8:33     ` Ulf Hansson
2015-05-05  8:33       ` Ulf Hansson
2015-05-13  2:16 ` [PATCH 0/3 v5] " Kuninori Morimoto
2015-05-13  2:17   ` [PATCH 1/3 v5] mmc: sh_mmcif: add sh_mmcif_host_to_dev() macro and use it Kuninori Morimoto
2015-05-13  2:17     ` Kuninori Morimoto
2015-05-13  2:18   ` [PATCH 2/3 v5] mmc: sh_mmcif: use sh_mmcif_xxx prefix for all functions Kuninori Morimoto
2015-05-13  2:18   ` [PATCH 3/3 v5] mmc: sh_mmcif: calculate best clock with parent clock Kuninori Morimoto
2015-05-13  2:18     ` Kuninori Morimoto
2015-05-13  7:55     ` Geert Uytterhoeven
2015-05-13  7:55       ` Geert Uytterhoeven
2015-05-13  8:37       ` Kuninori Morimoto
2015-05-13  8:43         ` Geert Uytterhoeven
2015-05-13  8:43           ` Geert Uytterhoeven
2015-05-13  9:27           ` Kuninori Morimoto
2015-05-14  7:20 ` [PATCH 0/3 v6] mmc: sh_mmcif: add parent clk support Kuninori Morimoto
2015-05-14  7:21   ` Kuninori Morimoto [this message]
2015-05-14  7:21     ` [PATCH 1/5 v6] mmc: sh_mmcif: add sh_mmcif_host_to_dev() macro and use it Kuninori Morimoto
2015-05-14  7:21   ` [PATCH 2/5 v6] mmc: sh_mmcif: use sh_mmcif_xxx prefix for all functions Kuninori Morimoto
2015-05-14  7:22   ` [PATCH 3/5 v6] mmc: sh_mmcif: calculate best clock with parent clock Kuninori Morimoto
2015-05-14  7:22     ` Kuninori Morimoto
2015-05-14  7:23   ` [PATCH 4/5 v6] ARM: shmobile: r8a7790: add MMCIF max-frequency Kuninori Morimoto
2015-05-14  7:23   ` [PATCH 5/5 v6] ARM: shmobile: r8a7791: " Kuninori Morimoto
2015-05-22 14:01   ` [PATCH 0/3 v6] mmc: sh_mmcif: add parent clk support Ulf Hansson
2015-05-22 14:01     ` Ulf Hansson
2015-05-25  0:24     ` Kuninori Morimoto
2015-05-25  0:50       ` Simon Horman
2015-05-25  0:50         ` Simon Horman
2015-05-25  0:26     ` Simon Horman
2015-05-25  0:26       ` Simon Horman
2015-05-25  0:38       ` Kuninori Morimoto
2015-05-25  8:44     ` Ulf Hansson
2015-05-25  8:44       ` Ulf Hansson
2015-05-25  9:38       ` Kuninori Morimoto

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=874mnfpqwl.wl%kuninori.morimoto.gx@renesas.com \
    --to=kuninori.morimoto.gx@renesas.com \
    --cc=geert@linux-m68k.org \
    --cc=horms@verge.net.au \
    --cc=keita.kobayashi.ym@renesas.com \
    --cc=laurent.pinchart@ideasonboard.com \
    --cc=linux-mmc@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=magnus.damm@gmail.com \
    --cc=ulf.hansson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.