linux-spi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] SPI: SSP SPI Controller driver
@ 2012-11-21  2:16 chao bi
  2012-11-21 12:08 ` Shubhrajyoti Datta
                   ` (3 more replies)
  0 siblings, 4 replies; 26+ messages in thread
From: chao bi @ 2012-11-21  2:16 UTC (permalink / raw)
  To: grant.likely-s3s/WqlpOiPyB63q8FvJNQ
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w


This patch is to implement SSP SPI controller driver, which has been applied and
validated on intel Moorestown & Medfield platform. The patch are originated by
Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
and to be further developed by Channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Chen Jun
<jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> according to their integration & validation on Medfield platform.

Signed-off-by: Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Chen Jun <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
 drivers/spi/Kconfig                   |    9 +
 drivers/spi/Makefile                  |    1 +
 drivers/spi/spi-intel-mid-ssp.c       | 1407 +++++++++++++++++++++++++++++++++
 include/linux/spi/spi-intel-mid-ssp.h |  326 ++++++++
 4 files changed, 1743 insertions(+), 0 deletions(-)
 create mode 100644 drivers/spi/spi-intel-mid-ssp.c
 create mode 100644 include/linux/spi/spi-intel-mid-ssp.h

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae35..8b4461b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -179,6 +179,15 @@ config SPI_IMX
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel MID platforms"
+	depends on SPI_MASTER && INTEL_MID_DMAC
+	help
+	  This is the unified SSP SPI master controller driver for
+	  the Intel MID platforms, handling Moorestown & Medfield,
+	  master clock mode.
+	  It supports Bulverde SSP core.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47..83f06d0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_FSL_ESPI)		+= spi-fsl-espi.o
 obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= spi-intel-mid-ssp.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
diff --git a/drivers/spi/spi-intel-mid-ssp.c b/drivers/spi/spi-intel-mid-ssp.c
new file mode 100644
index 0000000..8fca48f
--- /dev/null
+++ b/drivers/spi/spi-intel-mid-ssp.c
@@ -0,0 +1,1407 @@
+/*
+ * spi-intel-mid-ssp.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/module.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-intel-mid-ssp.h>
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static const struct pci_device_id pci_ids[];
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+	int tlen1 = (len < sz ? len : sz);
+	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
+	unsigned char *p;
+	static char msg[MAX_SPI_TRANSFER_SIZE];
+
+	memset(msg, '\0', sizeof(msg));
+	p = buf;
+	while (p < buf + tlen1)
+		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+	if (tlen2 > 0) {
+		sprintf(msg, "%s .....", msg);
+		p = (buf+len) - tlen2;
+		while (p < buf + len)
+			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+	}
+
+	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+		   len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+	u32 sssr;
+	sssr = read_SSSR(drv_context->ioaddr);
+	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+		return 0;
+	else
+		return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+	return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u32 i = 0;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (!is_tx_fifo_empty(drv_context)) {
+		dev_err(&drv_context->pdev->dev,
+				"TX FIFO not empty. Reset of SPI IF");
+		disable_interface(drv_context);
+		return;
+	}
+
+	dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+	while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
+		read_SSDR(reg);
+		i++;
+	}
+	WARN(i > 0, "%d words flush occured\n", i);
+
+	return;
+}
+
+static int null_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u8 n_bytes = drv_context->n_bytes;
+
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(0, reg);
+	drv_context->tx += n_bytes;
+
+	return 1;
+}
+
+static int null_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u8 n_bytes = drv_context->n_bytes;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		read_SSDR(reg);
+		drv_context->rx += n_bytes;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u8_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u8 *)(drv_context->tx), reg);
+	++drv_context->tx;
+
+	return 1;
+}
+
+static int u8_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u8 *)(drv_context->rx) = read_SSDR(reg);
+		++drv_context->rx;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u16_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u16 *)(drv_context->tx), reg);
+	drv_context->tx += 2;
+
+	return 1;
+}
+
+static int u16_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u16 *)(drv_context->rx) = read_SSDR(reg);
+		drv_context->rx += 2;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u32_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u32 *)(drv_context->tx), reg);
+	drv_context->tx += 4;
+
+	return 1;
+}
+
+static int u32_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u32 *)(drv_context->rx) = read_SSDR(reg);
+		drv_context->rx += 4;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct ssp_driver_context *drv_context =
+		(struct ssp_driver_context *)param;
+	bool ret = false;
+
+	if (!drv_context->dmac1)
+		return ret;
+
+	if (chan->device->dev == &drv_context->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @drv_context:	Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (!drv_context->dma_mapped)
+		return;
+	dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
+		PCI_DMA_FROMDEVICE);
+	dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
+		PCI_DMA_TODEVICE);
+	drv_context->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg:	Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+	struct callback_param *cb_param = (struct callback_param *)arg;
+	struct ssp_driver_context *drv_context = cb_param->drv_context;
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+
+	if (cb_param->direction == TX_DIRECTION)
+		drv_context->txdma_done = 1;
+	else
+		drv_context->rxdma_done = 1;
+
+	dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+		cb_param->direction, drv_context->rxdma_done,
+		drv_context->txdma_done);
+
+	if (drv_context->txdma_done && drv_context->rxdma_done) {
+		/* Clear Status Register */
+		write_SSSR(drv_context->clear_sr, reg);
+		dev_dbg(dev, "DMA done\n");
+		/* Disable Triggers to DMA or to CPU*/
+		disable_triggers(drv_context);
+		unmap_dma_buffers(drv_context);
+
+		queue_work(drv_context->dma_wq, &drv_context->complete_work);
+	}
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @drv_context:	Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	struct dma_slave_config *ds;
+	dma_cap_mask_t mask;
+	struct device *dev = &drv_context->pdev->dev;
+	unsigned int device_id;
+
+	/* Configure RX channel parameters */
+	rxs = &drv_context->dmas_rx;
+	ds = &rxs->dma_slave;
+
+	ds->direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	ds->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ds->src_addr_width = drv_context->n_bytes;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (drv_context->rx_fifo_threshold == 8) {
+		ds->src_maxburst = 8;
+		ds->dst_maxburst = 8;
+	} else if (drv_context->rx_fifo_threshold == 4) {
+		ds->src_maxburst = 4;
+		ds->dst_maxburst = 4;
+	} else {
+		ds->src_maxburst = 1;
+		ds->dst_maxburst = 1;
+	}
+
+	/* Configure TX channel parameters */
+	txs = &drv_context->dmas_tx;
+	ds = &txs->dma_slave;
+
+	ds->direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	ds->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ds->dst_addr_width = drv_context->n_bytes;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (drv_context->rx_fifo_threshold == 8) {
+		ds->src_maxburst = 8;
+		ds->dst_maxburst = 8;
+	} else if (drv_context->rx_fifo_threshold == 4) {
+		ds->src_maxburst = 4;
+		ds->dst_maxburst = 4;
+	} else {
+		ds->src_maxburst = 1;
+		ds->dst_maxburst = 1;
+	}
+
+	/* Nothing more to do if already initialized */
+	if (drv_context->dma_initialized)
+		return;
+
+	/* Use DMAC1 */
+	if (drv_context->quirks & QUIRKS_PLATFORM_MRST)
+		device_id = PCI_MRST_DMAC1_ID;
+	else
+		device_id = PCI_MDFL_DMAC1_ID;
+
+	drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+							device_id, NULL);
+
+	if (!drv_context->dmac1) {
+		dev_err(dev, "Can't find DMAC1");
+		return;
+	}
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+		drv_context->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+				2 * MAX_SPI_TRANSFER_SIZE);
+		if (drv_context->virt_addr_sram_rx)
+			drv_context->virt_addr_sram_tx =
+				drv_context->virt_addr_sram_rx +
+				MAX_SPI_TRANSFER_SIZE;
+		else
+			dev_err(dev, "Virt_addr_sram_rx is null\n");
+	}
+
+	/* 1. Allocate rx channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	drv_context->rxchan = dma_request_channel(mask, chan_filter,
+		drv_context);
+	if (!drv_context->rxchan)
+		goto err_exit;
+
+	drv_context->rxchan->private = rxs;
+
+	/* 2. Allocate tx channel */
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	drv_context->txchan = dma_request_channel(mask, chan_filter,
+		drv_context);
+
+	if (!drv_context->txchan)
+		goto free_rxchan;
+	else
+		drv_context->txchan->private = txs;
+
+	/* set the dma done bit to 1 */
+	drv_context->txdma_done = 1;
+	drv_context->rxdma_done = 1;
+
+	drv_context->tx_param.drv_context  = drv_context;
+	drv_context->tx_param.direction = TX_DIRECTION;
+	drv_context->rx_param.drv_context  = drv_context;
+	drv_context->rx_param.direction = RX_DIRECTION;
+
+	drv_context->dma_initialized = 1;
+
+	return;
+
+free_rxchan:
+	dma_release_channel(drv_context->rxchan);
+err_exit:
+	dev_err(dev, "Error : DMA Channel Not available\n");
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(drv_context->virt_addr_sram_rx);
+
+	pci_dev_put(drv_context->dmac1);
+	return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @drv_context:	Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
+{
+	dma_release_channel(drv_context->txchan);
+	dma_release_channel(drv_context->rxchan);
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(drv_context->virt_addr_sram_rx);
+
+	pci_dev_put(drv_context->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @drv_context:	Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_driver_context *drv_context)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &drv_context->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
+
+	if (drv_context->tx_dma)
+		drv_context->txdma_done = 0;
+
+	if (drv_context->rx_dma)
+		drv_context->rxdma_done = 0;
+
+	/* 2. prepare the RX dma transfer */
+	txchan = drv_context->txchan;
+	rxchan = drv_context->rxchan;
+
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* Since the DMA is configured to do 32bits access */
+		/* to/from the DDR, the DMA transfer size must be  */
+		/* a multiple of 4 bytes                           */
+		drv_context->len_dma_rx = drv_context->len & ~(4 - 1);
+		drv_context->len_dma_tx = drv_context->len_dma_rx;
+
+		/* In Rx direction, TRAIL Bytes are handled by memcpy */
+		if (drv_context->rx_dma &&
+			(drv_context->len_dma_rx >
+			drv_context->rx_fifo_threshold * drv_context->n_bytes))
+			drv_context->len_dma_rx =
+					TRUNCATE(drv_context->len_dma_rx,
+					drv_context->rx_fifo_threshold *
+					drv_context->n_bytes);
+		else if (!drv_context->rx_dma)
+			dev_err(dev, "ERROR : rx_dma is null\r\n");
+	} else {
+		/* TRAIL Bytes are handled by DMA */
+		if (drv_context->rx_dma) {
+			drv_context->len_dma_rx = drv_context->len;
+			drv_context->len_dma_tx = drv_context->len;
+		} else {
+			dev_err(dev, "ERROR : drv_context->rx_dma is null!\n");
+		}
+	}
+
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,				/* DMA Channel */
+		drv_context->rx_dma,			/* DAR */
+		ssdr_addr,				/* SAR */
+		drv_context->len_dma_rx,		/* Data Length */
+		flag);					/* Flag */
+
+	if (rxdesc) {
+		rxdesc->callback = intel_mid_ssp_spi_dma_done;
+		rxdesc->callback_param = &drv_context->rx_param;
+	} else {
+		dev_dbg(dev, "rxdesc is null! (len_dma_rx:%zd)\n",
+			drv_context->len_dma_rx);
+		drv_context->rxdma_done = 1;
+	}
+
+	/* 3. prepare the TX dma transfer */
+	if (drv_context->tx_dma) {
+		txdesc = txchan->device->device_prep_dma_memcpy
+		(txchan,				/* DMA Channel */
+		ssdr_addr,				/* DAR */
+		drv_context->tx_dma,			/* SAR */
+		drv_context->len_dma_tx,		/* Data Length */
+		flag);					/* Flag */
+		if (txdesc) {
+			txdesc->callback = intel_mid_ssp_spi_dma_done;
+			txdesc->callback_param = &drv_context->tx_param;
+		} else {
+			dev_dbg(dev, "txdesc is null! (len_dma_tx:%zd)\n",
+				drv_context->len_dma_tx);
+			drv_context->txdma_done = 1;
+		}
+	} else {
+		dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
+		return;
+	}
+
+	dev_info(dev, "DMA transfer len:%zd len_dma_tx:%zd len_dma_rx:%zd\n",
+		drv_context->len, drv_context->len_dma_tx,
+		drv_context->len_dma_rx);
+
+	if (rxdesc || txdesc) {
+		if (rxdesc) {
+			dev_dbg(dev, "Firing DMA RX channel\n");
+			rxdesc->tx_submit(rxdesc);
+		}
+		if (txdesc) {
+			dev_dbg(dev, "Firing DMA TX channel\n");
+			txdesc->tx_submit(txdesc);
+		}
+	} else {
+		struct callback_param cb_param;
+		cb_param.drv_context = drv_context;
+		dev_dbg(dev, "Bypassing DMA transfer\n");
+		intel_mid_ssp_spi_dma_done(&cb_param);
+	}
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @drv_context:	Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (unlikely(drv_context->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped\n");
+		return 0;
+	}
+	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+		/* Copy drv_context->tx into sram_tx */
+		memcpy_toio(drv_context->virt_addr_sram_tx, drv_context->tx,
+			drv_context->len);
+#ifdef DUMP_RX
+		dump_trailer(&drv_context->pdev->dev, drv_context->tx,
+			drv_context->len, 16);
+#endif
+		drv_context->rx_dma = SRAM_RX_ADDR;
+		drv_context->tx_dma = SRAM_TX_ADDR;
+	} else {
+		/* no QUIRKS_SRAM_ADDITIONAL_CPY */
+		if (unlikely(drv_context->dma_mapped))
+			return 1;
+
+		drv_context->tx_dma =
+			dma_map_single(dev, drv_context->tx, drv_context->len,
+				PCI_DMA_TODEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_context->tx_dma))) {
+			dev_err(dev, "ERROR : tx dma mapping failed\n");
+			return 0;
+		}
+
+		drv_context->rx_dma =
+			dma_map_single(dev, drv_context->rx, drv_context->len,
+				PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_context->rx_dma))) {
+			dma_unmap_single(dev, drv_context->tx_dma,
+				drv_context->len, DMA_TO_DEVICE);
+			dev_err(dev, "ERROR : rx dma mapping failed\n");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @drv_context:	Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+
+	if (drv_context->len != drv_context->len_dma_rx) {
+		dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+			read_SSSR(reg));
+		drv_context->rx += drv_context->len_dma_rx;
+		drv_context->tx += drv_context->len_dma_tx;
+
+		while ((drv_context->tx != drv_context->tx_end) ||
+			(drv_context->rx != drv_context->rx_end)) {
+			drv_context->read(drv_context);
+			drv_context->write(drv_context);
+		}
+	}
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @drv_context:	Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
+{
+	u32 length = drv_context->len;
+
+	if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+		&& (drv_context->len > drv_context->rx_fifo_threshold *
+		drv_context->n_bytes))
+		length = TRUNCATE(drv_context->len,
+			drv_context->rx_fifo_threshold * drv_context->n_bytes);
+
+	memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	struct spi_message *msg;
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+		pm_qos_update_request(&drv_context->pm_qos_req,
+					PM_QOS_DEFAULT_VALUE);
+
+	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+		sram_to_ddr_cpy(drv_context);
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+		drain_trail(drv_context);
+	else
+		/* Stop getting Time Outs */
+		write_SSTO(0, reg);
+
+	drv_context->cur_msg->status = 0;
+	drv_context->cur_msg->actual_length = drv_context->len;
+
+#ifdef DUMP_RX
+	dump_trailer(dev, drv_context->rx, drv_context->len, 16);
+#endif
+
+	dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+	msg = drv_context->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+	struct ssp_driver_context *drv_context = container_of(work,
+				struct ssp_driver_context, complete_work);
+
+	int_transfer_complete(drv_context);
+}
+
+static void poll_transfer_complete(struct ssp_driver_context *drv_context)
+{
+	struct spi_message *msg;
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_context->cur_msg->actual_length +=
+		drv_context->len - (drv_context->rx_end - drv_context->rx);
+
+	drv_context->cur_msg->status = 0;
+
+	msg = drv_context->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct ssp_driver_context *drv_context = dev_id;
+	void *reg = drv_context->ioaddr;
+	struct device *dev = &drv_context->pdev->dev;
+	u32 status = read_SSSR(reg);
+
+	/* It should never be our interrupt since SSP will */
+	/* only trigs interrupt for under/over run.        */
+	if (likely(!(status & drv_context->mask_sr)))
+		return IRQ_NONE;
+
+	if (status & SSSR_ROR || status & SSSR_TUR) {
+		dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",	status);
+		WARN_ON(1);
+		if (status & SSSR_ROR)
+			dev_err(dev, "we have Overrun\n");
+		if (status & SSSR_TUR)
+			dev_err(dev, "we have Underrun\n");
+	}
+
+	/* We can fall here when not using DMA mode */
+	if (!drv_context->cur_msg) {
+		disable_interface(drv_context);
+		disable_triggers(drv_context);
+	}
+	/* clear status register */
+	write_SSSR(drv_context->clear_sr, reg);
+	return IRQ_HANDLED;
+}
+
+static void poll_transfer(unsigned long data)
+{
+	struct ssp_driver_context *drv_context =
+		(struct ssp_driver_context *)data;
+
+	if (drv_context->tx)
+		while (drv_context->tx != drv_context->tx_end) {
+			drv_context->write(drv_context);
+			drv_context->read(drv_context);
+		}
+
+	while (!drv_context->read(drv_context))
+		cpu_relax();
+
+	poll_transfer_complete(drv_context);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @drv_context:	Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_driver_context *drv_context)
+{
+	u32 sssr;
+	u32 count = 0;
+	u32 cr0;
+	void *i2c_reg = drv_context->I2C_ioaddr;
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+	struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
+	cr0 = chip->cr0;
+
+	dev_warn(dev, "In %s : Starting bit banging\n",\
+		__func__);
+	if (read_SSSR(reg) & SSP_NOT_SYNC)
+		dev_warn(dev, "SSP clock desynchronized.\n");
+	if (!(read_SSCR0(reg) & SSCR0_SSE))
+		dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+	dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+	write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+	write_SSPSP(0x02010007, reg);
+
+	write_SSTO(chip->timeout, reg);
+	write_SSCR0(cr0, reg);
+
+	/*
+	*  This routine uses the DFx block to override the SSP inputs
+	*  and outputs allowing us to bit bang SSPSCLK. On Langwell,
+	*  we have to generate the clock to clear busy.
+	*/
+	write_I2CDATA(0x3, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070034, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CDATA(0x00000099, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	sssr = read_SSSR(reg);
+
+	/* Bit bang the clock until CSS clears */
+	while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+		write_I2CDATA(0x2, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CDATA(0x3, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		sssr = read_SSSR(reg);
+		count++;
+	}
+	if (count >= MAX_BITBANGING_LOOP)
+		dev_err(dev,
+			"ERROR in %s : infinite loop on bit banging. Aborting\n",
+			__func__);
+
+	dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+	write_I2CDATA(0x0, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(int speed)
+{
+	return max(100000000 / speed, 4) - 1;
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi:	Pointer to the spi_device struct
+ * @msg:	Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct ssp_driver_context *drv_context = \
+	spi_master_get_devdata(spi->master);
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = drv_context->ioaddr;
+	u32 cr1;
+	struct device *dev = &drv_context->pdev->dev;
+	chip = spi_get_ctldata(msg->spi);
+
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	drv_context->cur_msg = msg;
+
+	/* We handle only one transfer message since the protocol module has to
+	   control the out of band signaling. */
+	transfer = list_entry(msg->transfers.next,
+					struct spi_transfer,
+					transfer_list);
+
+	/* Check transfer length */
+	if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+		(transfer->len == 0))) {
+		dev_warn(dev, "transfer length null or greater than %d\n",
+			MAX_SPI_TRANSFER_SIZE);
+		dev_warn(dev, "length = %d\n", transfer->len);
+		msg->status = -EINVAL;
+
+		if (msg->complete)
+			msg->complete(msg->context);
+
+		return 0;
+	}
+
+	/* Flush any remaining data (in case of failed previous transfer) */
+	flush(drv_context);
+
+	drv_context->tx  = (void *)transfer->tx_buf;
+	drv_context->rx  = (void *)transfer->rx_buf;
+	drv_context->len = transfer->len;
+	drv_context->write = chip->write;
+	drv_context->read = chip->read;
+
+	if (likely(chip->dma_enabled)) {
+		drv_context->dma_mapped = map_dma_buffers(drv_context);
+		if (unlikely(!drv_context->dma_mapped))
+			return 0;
+	} else {
+		drv_context->write = drv_context->tx ?
+			chip->write : null_writer;
+		drv_context->read  = drv_context->rx ?
+			chip->read : null_reader;
+	}
+	drv_context->tx_end = drv_context->tx + transfer->len;
+	drv_context->rx_end = drv_context->rx + transfer->len;
+
+	/* Clear status  */
+	write_SSSR(drv_context->clear_sr, reg);
+
+	/* setup the CR1 control register */
+	cr1 = chip->cr1 | drv_context->cr1_sig;
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* in case of len smaller than burst size, adjust the RX     */
+		/* threshold. All other cases will use the default threshold */
+		/* value. The RX fifo threshold must be aligned with the DMA */
+		/* RX transfer size, which may be limited to a multiple of 4 */
+		/* bytes due to 32bits DDR access.                           */
+		if  (drv_context->len / drv_context->n_bytes <=
+			drv_context->rx_fifo_threshold) {
+			u32 rx_fifo_threshold;
+
+			rx_fifo_threshold = (drv_context->len & ~(4 - 1)) /
+				drv_context->n_bytes;
+			cr1 &= ~(SSCR1_RFT);
+			cr1 |= SSCR1_RxTresh(rx_fifo_threshold)
+					& SSCR1_RFT;
+		} else {
+			write_SSTO(chip->timeout, reg);
+		}
+	}
+
+	dev_dbg(dev,
+		"transfer len:%zd  n_bytes:%d  cr0:%x  cr1:%x",
+		drv_context->len, drv_context->n_bytes, chip->cr0, cr1);
+
+	/* first set CR1 */
+	write_SSCR1(cr1, reg);
+
+	/* Do bitbanging only if SSP not-enabled or not-synchronized */
+	if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+		(!(read_SSCR0(reg) & SSCR0_SSE))) &&
+		(drv_context->quirks & QUIRKS_BIT_BANGING))) {
+			start_bitbanging(drv_context);
+	} else {
+		/* (re)start the SSP */
+		write_SSCR0(chip->cr0, reg);
+	}
+
+	if (likely(chip->dma_enabled)) {
+		if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+			pm_qos_update_request(&drv_context->pm_qos_req,
+				MIN_EXIT_LATENCY);
+		dma_transfer(drv_context);
+	} else {
+		tasklet_schedule(&drv_context->poll_transfer);
+	}
+
+	return 0;
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi:	Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+	struct ssp_driver_context *drv_context =
+		spi_master_get_devdata(spi->master);
+	u32 tx_fifo_threshold;
+	u32 burst_size;
+	u32 clk_div;
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+	if ((spi->bits_per_word < MIN_BITS_PER_WORD
+		|| spi->bits_per_word > MAX_BITS_PER_WORD))
+		return -EINVAL;
+
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev,
+			"failed setup: can't allocate chip data\n");
+			return -ENOMEM;
+		}
+	}
+	chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+		spi->bits_per_word - 16 : spi->bits_per_word)
+			| SSCR0_SSE
+			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+	/* protocol drivers may change the chip settings, so...  */
+	/* if chip_info exists, use it                           */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	chip->cr1 = 0;
+	if (chip_info) {
+		burst_size = chip_info->burst_size;
+		if (burst_size > IMSS_FIFO_BURST_8)
+			burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->timeout = chip_info->timeout;
+
+		if (chip_info->enable_loopback)
+			chip->cr1 |= SSCR1_LBM;
+
+		chip->dma_enabled = chip_info->dma_enabled;
+
+	} else {
+		/* if no chip_info provided by protocol driver, */
+		/* set default values                           */
+		dev_info(&spi->dev, "setting default chip values\n");
+
+		burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->dma_enabled = 1;
+		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			chip->timeout = 0;
+		else
+			chip->timeout = DFLT_TIMEOUT_VAL;
+	}
+	/* Set FIFO thresholds according to burst_size */
+	if (burst_size == IMSS_FIFO_BURST_8)
+		drv_context->rx_fifo_threshold = 8;
+	else if (burst_size == IMSS_FIFO_BURST_4)
+		drv_context->rx_fifo_threshold = 4;
+	else
+		drv_context->rx_fifo_threshold = 1;
+	tx_fifo_threshold = SPI_FIFO_SIZE - drv_context->rx_fifo_threshold;
+	chip->cr1 |= (SSCR1_RxTresh(drv_context->rx_fifo_threshold) &
+		SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) &
+		SSCR1_TFT);
+
+	drv_context->dma_mapped = 0;
+
+	/* setting phase and polarity. spi->mode comes from boardinfo */
+	if ((spi->mode & SPI_CPHA) != 0)
+		chip->cr1 |= SSCR1_SPH;
+	if ((spi->mode & SPI_CPOL) != 0)
+		chip->cr1 |= SSCR1_SPO;
+
+	if (drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
+		/* set slave mode */
+		chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
+	chip->cr1 |= SSCR1_SCFR;        /* clock is not free running */
+
+	dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
+		spi->bits_per_word,
+		spi->mode & 0x3);
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+		chip->read = u8_reader;
+		chip->write = u8_writer;
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+		chip->read = u16_reader;
+		chip->write = u16_writer;
+	} else if (spi->bits_per_word <= 32) {
+		chip->cr0 |= SSCR0_EDSS;
+		chip->n_bytes = 4;
+		chip->read = u32_reader;
+		chip->write = u32_writer;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize\n");
+		return -EINVAL;
+	}
+
+	if ((drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
+		chip->speed_hz = spi->max_speed_hz;
+		clk_div = ssp_get_clk_div(chip->speed_hz);
+		chip->cr0 |= clk_div << 8;
+	}
+	chip->bits_per_word = spi->bits_per_word;
+
+	spi_set_ctldata(spi, chip);
+
+	/* setup of drv_context members that will not change across transfers */
+	drv_context->n_bytes = chip->n_bytes;
+
+	if (chip->dma_enabled) {
+		intel_mid_ssp_spi_dma_init(drv_context);
+		drv_context->cr1_sig  = SSCR1_TSRE | SSCR1_RSRE;
+		drv_context->mask_sr  = SSSR_ROR | SSSR_TUR;
+		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			drv_context->cr1_sig  |= SSCR1_TRAIL;
+	} else {
+		drv_context->cr1_sig = SSCR1_RIE | SSCR1_TIE | SSCR1_TINTE;
+		drv_context->mask_sr = SSSR_RFS | SSSR_TFS |
+				 SSSR_ROR | SSSR_TUR | SSSR_TINT;
+	}
+	drv_context->clear_sr = SSSR_TUR  | SSSR_ROR | SSSR_TINT;
+
+	return 0;
+}
+
+/**
+ * cleanup() - Driver cleanup procedure
+ * @spi:	Pointer to the spi_device struct
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct ssp_driver_context *drv_context =
+		spi_master_get_devdata(spi->master);
+
+	if (drv_context->dma_initialized)
+		intel_mid_ssp_spi_dma_exit(drv_context);
+
+	/* Remove the PM_QOS request */
+	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_remove_request(&drv_context->pm_qos_req);
+
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+/**
+ * intel_mid_ssp_spi_probe() - Driver probe procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @ent:	Pointer to the pci_device_id struct
+ */
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct ssp_driver_context *drv_context = 0;
+	int status;
+	u32 iolen = 0;
+	u8 ssp_cfg;
+	int pos;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+
+	/* Check if the SSP we are probed for has been allocated */
+	/* to operate as SPI. This information is retreived from */
+	/* the field adid of the Vendor-Specific PCI capability  */
+	/* which is used as a configuration register.            */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability\n");
+		goto err_abort_probe;
+	}
+
+	if (SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)\n",
+			ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(dev, "found PCI SSP controller(ID: %04xh:%04xh cfg: %02xh)\n",
+		pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for drv_context and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_slave\n");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	drv_context = spi_master_get_devdata(master);
+	drv_context->master = master;
+
+	drv_context->pdev = pdev;
+	drv_context->quirks = ent->driver_data;
+
+	/* Set platform & configuration quirks */
+	if (drv_context->quirks & QUIRKS_PLATFORM_MRST) {
+		/* Apply bit banging workarround on MRST */
+		drv_context->quirks |= QUIRKS_BIT_BANGING;
+		/* MRST slave mode workarrounds */
+		if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
+			drv_context->quirks |=
+				QUIRKS_USE_PM_QOS |
+				QUIRKS_SRAM_ADDITIONAL_CPY;
+	}
+	drv_context->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
+	if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
+		drv_context->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
+
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
+	master->num_chipselect = 1;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
+	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
+
+	drv_context->dma_initialized = 0;
+
+	/* get basic io resource and map it */
+	drv_context->paddr = pci_resource_start(pdev, 0);
+	iolen = pci_resource_len(pdev, 0);
+
+	status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
+	if (status)
+		goto err_free_1;
+
+	drv_context->ioaddr =
+		ioremap_nocache(drv_context->paddr, iolen);
+	if (!drv_context->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", drv_context->paddr);
+	dev_dbg(dev, "ioaddr = : %p\n", drv_context->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
+	dev_dbg(dev, "quirks = : %08lx\n", drv_context->quirks);
+
+	if (drv_context->quirks & QUIRKS_BIT_BANGING) {
+		/* Bit banging on the clock is done through */
+		/* DFT which is available through I2C.      */
+		/* get base address of I2C_Serbus registers */
+		drv_context->I2C_paddr = 0xff12b000;
+		drv_context->I2C_ioaddr =
+			ioremap_nocache(drv_context->I2C_paddr, 0x10);
+		if (!drv_context->I2C_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_3;
+		}
+	}
+
+	/* Attach to IRQ */
+	drv_context->irq = pdev->irq;
+	status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", drv_context);
+	if (status < 0) {
+		dev_err(&pdev->dev, "can not get IRQ\n");
+		goto err_free_4;
+	}
+
+	if (drv_context->quirks & QUIRKS_PLATFORM_MDFL) {
+		/* get base address of DMA selector. */
+		syscfg = drv_context->paddr - SYSCFG;
+		syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+		if (!syscfg_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_5;
+		}
+		iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+	}
+
+	tasklet_init(&drv_context->poll_transfer, poll_transfer,
+		(unsigned long)drv_context);
+
+	/* Register with the SPI framework */
+	dev_info(dev, "register with SPI framework (bus spi%d)\n",
+		master->bus_num);
+
+	status = spi_register_master(master);
+
+	if (status != 0) {
+		dev_err(dev, "problem registering spi\n");
+		goto err_free_5;
+	}
+
+	pci_set_drvdata(pdev, drv_context);
+
+	/* Create the PM_QOS request */
+	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_add_request(&drv_context->pm_qos_req,
+		PM_QOS_CPU_DMA_LATENCY,
+		PM_QOS_DEFAULT_VALUE);
+
+	return status;
+
+err_free_5:
+	free_irq(drv_context->irq, drv_context);
+err_free_4:
+	iounmap(drv_context->I2C_ioaddr);
+err_free_3:
+	iounmap(drv_context->ioaddr);
+err_free_2:
+	pci_release_region(pdev, 0);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+/**
+ * intel_mid_ssp_spi_remove() - driver remove procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+
+	if (!drv_context)
+		return;
+
+	/* Release IRQ */
+	free_irq(drv_context->irq, drv_context);
+
+	iounmap(drv_context->ioaddr);
+	if (drv_context->quirks & QUIRKS_BIT_BANGING)
+		iounmap(drv_context->I2C_ioaddr);
+
+	/* disconnect from the SPI framework */
+	spi_unregister_master(drv_context->master);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * intel_mid_ssp_spi_suspend() - Driver suspend procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @state:	pm_message_t
+ */
+static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+	dev_dbg(&pdev->dev, "suspend\n");
+
+	tasklet_disable(&drv_context->poll_transfer);
+
+	return 0;
+}
+
+/**
+ * intel_mid_ssp_spi_resume() - Driver resume procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+	dev_dbg(&pdev->dev, "resume\n");
+
+	tasklet_enable(&drv_context->poll_transfer);
+
+	return 0;
+}
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#endif /* CONFIG_PM */
+
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+	/* MRST SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0815), QUIRKS_PLATFORM_MRST},
+	/* MDFL SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0832), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP1 */
+	{ PCI_VDEVICE(INTEL, 0x0825), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP3 */
+	{ PCI_VDEVICE(INTEL, 0x0816), QUIRKS_PLATFORM_MDFL},
+	/* MRFL SSP5 */
+	{ PCI_VDEVICE(INTEL, 0x1194), 0},
+	{},
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	__devexit_p(intel_mid_ssp_spi_remove),
+	.suspend =	intel_mid_ssp_spi_suspend,
+	.resume =	intel_mid_ssp_spi_resume,
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+
+module_exit(intel_mid_ssp_spi_exit);
+
diff --git a/include/linux/spi/spi-intel-mid-ssp.h b/include/linux/spi/spi-intel-mid-ssp.h
new file mode 100644
index 0000000..1b90b75
--- /dev/null
+++ b/include/linux/spi/spi-intel-mid-ssp.h
@@ -0,0 +1,326 @@
+/*
+ *  Copyright (C) Intel 2009
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_H_
+#define INTEL_MID_SSP_SPI_H_
+
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+#define PCI_MRST_DMAC1_ID	0x0814
+#define PCI_MDFL_DMAC1_ID	0x0827
+
+#define SSP_NOT_SYNC 0x400000
+#define MAX_SPI_TRANSFER_SIZE 8192
+#define MAX_BITBANGING_LOOP   10000
+#define SPI_FIFO_SIZE 16
+
+/* PM QoS define */
+#define MIN_EXIT_LATENCY 20
+
+/* SSP assignement configuration from PCI config */
+#define SSP_CFG_GET_MODE(ssp_cfg)	((ssp_cfg) & 0x07)
+#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)	(((ssp_cfg) >> 3) & 0x07)
+#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)	((ssp_cfg) & 0x40)
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* Driver's quirk flags */
+/* This workarround bufferizes data in the audio fabric SDRAM from  */
+/* where the DMA transfers will operate. Should be enabled only for */
+/* SPI slave mode.                                                  */
+#define QUIRKS_SRAM_ADDITIONAL_CPY	1
+/* If set the trailing bytes won't be handled by the DMA.           */
+/* Trailing byte feature not fully available.                       */
+#define QUIRKS_DMA_USE_NO_TRAIL		2
+/* If set, the driver will use PM_QOS to reduce the latency         */
+/* introduced by the deeper C-states which may produce over/under   */
+/* run issues. Must be used in slave mode. In master mode, the      */
+/* latency is not critical, but setting this workarround  may       */
+/* improve the SPI throughput.                                      */
+#define QUIRKS_USE_PM_QOS		4
+/* This quirks is set on Moorestown                                 */
+#define QUIRKS_PLATFORM_MRST		8
+/* This quirks is set on Medfield                                   */
+#define QUIRKS_PLATFORM_MDFL		16
+/* If set, the driver will apply the bitbanging workarround needed  */
+/* to enable defective Langwell stepping A SSP. The defective SSP   */
+/* can be enabled only once, and should never be disabled.          */
+#define QUIRKS_BIT_BANGING		32
+/* If set, SPI is in slave clock mode                               */
+#define QUIRKS_SPI_SLAVE_CLOCK_MODE	64
+
+/* Uncomment to get RX and TX short dumps after each transfer */
+/* #define DUMP_RX 1 */
+#define MAX_TRAILING_BYTE_RETRY 16
+#define MAX_TRAILING_BYTE_LOOP 100
+#define DELAY_TO_GET_A_WORD 3
+#define DFLT_TIMEOUT_VAL 500
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+#define RX_DIRECTION 0
+#define TX_DIRECTION 1
+
+#define I2C_ACCESS_USDELAY 10
+
+#define DFLT_BITS_PER_WORD 16
+#define MIN_BITS_PER_WORD     4
+#define MAX_BITS_PER_WORD     32
+#define DFLT_FIFO_BURST_SIZE	IMSS_FIFO_BURST_8
+
+#define TRUNCATE(x, a) ((x) & ~((a)-1))
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+
+DEFINE_SSP_REG(I2CCTRL, 0x00);
+DEFINE_SSP_REG(I2CDATA, 0x04);
+
+DEFINE_SSP_REG(GPLR1, 0x04);
+DEFINE_SSP_REG(GPDR1, 0x0c);
+DEFINE_SSP_REG(GPSR1, 0x14);
+DEFINE_SSP_REG(GPCR1, 0x1C);
+DEFINE_SSP_REG(GAFR1_U, 0x44);
+
+#define SYSCFG  0x20bc0
+
+#define SRAM_BASE_ADDR 0xfffdc000
+#define SRAM_RX_ADDR   SRAM_BASE_ADDR
+#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
+#define SSCR0_NCS   (1 << 21)           /* Network clock select */
+#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
+#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF		(1 << 2)	/* Tx FIFO Not Full */
+#define SSSR_RNE		(1 << 3)	/* Rx FIFO Not Empty */
+#define SSSR_BSY		(1 << 4)	/* SSP Busy */
+#define SSSR_TFS		(1 << 5)	/* Tx FIFO Service Request */
+#define SSSR_RFS		(1 << 6)	/* Rx FIFO Service Request */
+#define SSSR_ROR		(1 << 7)	/* Rx FIFO Overrun */
+#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
+
+#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
+#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
+
+#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
+#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
+#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
+
+#define SSSR_BCE         (1 << 23) /* Bit Count Error */
+#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC         (1 << 20) /* End Of Chain */
+#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
+#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
+#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+struct callback_param {
+	void *drv_context;
+	u32 direction;
+};
+
+struct ssp_driver_context {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void *ioaddr;
+	int irq;
+
+	/* I2C registers */
+	dma_addr_t I2C_paddr;
+	void *I2C_ioaddr;
+
+	/* SSP masks*/
+	u32 cr1_sig;
+	u32 cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+	/* PM_QOS request */
+	struct pm_qos_request pm_qos_req;
+
+	struct tasklet_struct poll_transfer;
+
+	spinlock_t lock;
+
+	/* Current message transfer state info */
+	struct spi_message *cur_msg;
+	size_t len;
+	size_t len_dma_rx;
+	size_t len_dma_tx;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	bool dma_initialized;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	u8 n_bytes;
+	int (*write)(struct ssp_driver_context *drv_context);
+	int (*read)(struct ssp_driver_context *drv_context);
+
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan    *txchan;
+	struct dma_chan    *rxchan;
+	struct workqueue_struct *dma_wq;
+	struct work_struct complete_work;
+
+	u8 __iomem *virt_addr_sram_tx;
+	u8 __iomem *virt_addr_sram_rx;
+
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+	struct pci_dev *dmac1;
+
+	unsigned long quirks;
+	u32 rx_fifo_threshold;
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 timeout;
+	u8 n_bytes;
+	u8 dma_enabled;
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct ssp_driver_context *drv_context);
+	int (*read)(struct ssp_driver_context *drv_context);
+};
+
+
+enum intel_mid_ssp_spi_fifo_burst {
+	IMSS_FIFO_BURST_1,
+	IMSS_FIFO_BURST_4,
+	IMSS_FIFO_BURST_8
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	enum intel_mid_ssp_spi_fifo_burst burst_size;
+	u32 timeout;
+	u8 enable_loopback;
+	u8 dma_enabled;
+};
+
+
+#define SPI_DIB_NAME_LEN  16
+#define SPI_DIB_SPEC_INFO_LEN      10
+
+struct spi_dib_header {
+	u32       signature;
+	u32       length;
+	u8         rev;
+	u8         checksum;
+	u8         dib[0];
+} __packed;
+
+#endif /*INTEL_MID_SSP_SPI_H_*/
-- 
1.7.1




------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-11-21  2:16 [PATCH] SPI: SSP SPI Controller driver chao bi
@ 2012-11-21 12:08 ` Shubhrajyoti Datta
       [not found]   ` <CAM=Q2cvoEMScnCmfrhoAueZ8bfPCX90TxZmsSigfeRbGeXbzMA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2012-11-21 12:14 ` Shubhrajyoti Datta
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 26+ messages in thread
From: Shubhrajyoti Datta @ 2012-11-21 12:08 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Wed, Nov 21, 2012 at 7:46 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

>
> This patch is to implement SSP SPI controller driver, which has been
> applied and
> validated on intel Moorestown & Medfield platform. The patch are
> originated by
> Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Sylvain Centelles <
> sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
> and to be further developed by Channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Chen Jun
> <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> according to their integration & validation on
> Medfield platform.
>
> Signed-off-by: Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Chen Jun <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> ---
>  drivers/spi/Kconfig                   |    9 +
>  drivers/spi/Makefile                  |    1 +
>  drivers/spi/spi-intel-mid-ssp.c       | 1407
> +++++++++++++++++++++++++++++++++
>  include/linux/spi/spi-intel-mid-ssp.h |  326 ++++++++
>  4 files changed, 1743 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/spi/spi-intel-mid-ssp.c
>  create mode 100644 include/linux/spi/spi-intel-mid-ssp.h
>
> diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
> index 1acae35..8b4461b 100644
> --- a/drivers/spi/Kconfig
> +++ b/drivers/spi/Kconfig
> @@ -179,6 +179,15 @@ config SPI_IMX
>           This enables using the Freescale i.MX SPI controllers in master
>           mode.
>
> +config SPI_INTEL_MID_SSP
> +       tristate "SSP SPI controller driver for Intel MID platforms"
> +       depends on SPI_MASTER && INTEL_MID_DMAC
> +       help
> +         This is the unified SSP SPI master controller driver for
> +         the Intel MID platforms, handling Moorestown & Medfield,
> +         master clock mode.
> +         It supports Bulverde SSP core.
> +
>  config SPI_LM70_LLP
>         tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
>         depends on PARPORT && EXPERIMENTAL
> diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
> index c48df47..83f06d0 100644
> --- a/drivers/spi/Makefile
> +++ b/drivers/spi/Makefile
> @@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_FSL_ESPI)            += spi-fsl-espi.o
>  obj-$(CONFIG_SPI_FSL_SPI)              += spi-fsl-spi.o
>  obj-$(CONFIG_SPI_GPIO)                 += spi-gpio.o
>  obj-$(CONFIG_SPI_IMX)                  += spi-imx.o
> +obj-$(CONFIG_SPI_INTEL_MID_SSP)                += spi-intel-mid-ssp.o
>  obj-$(CONFIG_SPI_LM70_LLP)             += spi-lm70llp.o
>  obj-$(CONFIG_SPI_MPC512x_PSC)          += spi-mpc512x-psc.o
>  obj-$(CONFIG_SPI_MPC52xx_PSC)          += spi-mpc52xx-psc.o
> diff --git a/drivers/spi/spi-intel-mid-ssp.c
> b/drivers/spi/spi-intel-mid-ssp.c
> new file mode 100644
> index 0000000..8fca48f
> --- /dev/null
> +++ b/drivers/spi/spi-intel-mid-ssp.c
> @@ -0,0 +1,1407 @@
> +/*
> + * spi-intel-mid-ssp.c
> + * This driver supports Bulverde SSP core used on Intel MID platforms
> + * It supports SSP of Moorestown & Medfield platforms and handles clock
> + * slave & master modes.
> + *
> + * Copyright (c) 2010, Intel Corporation.
> + *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> + *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
> for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License
> along with
> + * this program; if not, write to the Free Software Foundation, Inc.,
> + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> + *
> + */
> +
> +/*
> + * Note:
> + *
> + * Supports DMA and non-interrupt polled transfers.
> + *
> + */
> +
> +#include <linux/delay.h>
> +#include <linux/interrupt.h>
> +#include <linux/highmem.h>
> +#include <linux/pci.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/intel_mid_dma.h>
> +#include <linux/pm_qos.h>
> +#include <linux/module.h>
> +
> +#include <linux/spi/spi.h>
> +#include <linux/spi/spi-intel-mid-ssp.h>
> +
> +#define DRIVER_NAME "intel_mid_ssp_spi_unified"
> +
> +MODULE_AUTHOR("Ken Mills");
> +MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
> +MODULE_LICENSE("GPL");
> +
> +static const struct pci_device_id pci_ids[];
> +
> +#ifdef DUMP_RX
> +static void dump_trailer(const struct device *dev, char *buf, int len,
> int sz)
> +{
> +       int tlen1 = (len < sz ? len : sz);
> +       int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> +       unsigned char *p;
> +       static char msg[MAX_SPI_TRANSFER_SIZE];
> +
> +       memset(msg, '\0', sizeof(msg));
> +       p = buf;
> +       while (p < buf + tlen1)
> +               sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +
> +       if (tlen2 > 0) {
> +               sprintf(msg, "%s .....", msg);
> +               p = (buf+len) - tlen2;
> +               while (p < buf + len)
> +                       sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +       }
> +
> +       dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
> +                  len-tlen2, len - 1, msg);
> +}
> +#endif
> +
> +static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
> +{
> +       u32 sssr;
> +       sssr = read_SSSR(drv_context->ioaddr);
> +       if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
> +               return 0;
> +       else
> +               return 1;
> +}
> +
> +static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
> +{
> +       return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
> +}
> +
> +static inline void disable_interface(struct ssp_driver_context
> *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
> +}
> +
> +static inline void disable_triggers(struct ssp_driver_context
> *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
> +}
> +
> +
> +static void flush(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       u32 i = 0;
> +
> +       /* If the transmit fifo is not empty, reset the interface. */
> +       if (!is_tx_fifo_empty(drv_context)) {
> +               dev_err(&drv_context->pdev->dev,
> +                               "TX FIFO not empty. Reset of SPI IF");
> +               disable_interface(drv_context);
> +               return;
> +       }
> +
> +       dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
> +       while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
> +               read_SSDR(reg);
> +               i++;
> +       }
> +       WARN(i > 0, "%d words flush occured\n", i);
> +
> +       return;
> +}
> +
> +static int null_writer(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       u8 n_bytes = drv_context->n_bytes;
> +
> +       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +               || (drv_context->tx == drv_context->tx_end))
> +               return 0;
> +
> +       write_SSDR(0, reg);
> +       drv_context->tx += n_bytes;
> +
> +       return 1;
> +}
> +
> +static int null_reader(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       u8 n_bytes = drv_context->n_bytes;
> +
> +       while ((read_SSSR(reg) & SSSR_RNE)
> +               && (drv_context->rx < drv_context->rx_end)) {
> +               read_SSDR(reg);
> +               drv_context->rx += n_bytes;
> +       }
> +
> +       return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static int u8_writer(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +               || (drv_context->tx == drv_context->tx_end))
> +               return 0;
> +
> +       write_SSDR(*(u8 *)(drv_context->tx), reg);
> +       ++drv_context->tx;
> +
> +       return 1;
> +}
> +
> +static int u8_reader(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       while ((read_SSSR(reg) & SSSR_RNE)
> +               && (drv_context->rx < drv_context->rx_end)) {
> +               *(u8 *)(drv_context->rx) = read_SSDR(reg);
> +               ++drv_context->rx;
> +       }
> +
> +       return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static int u16_writer(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +               || (drv_context->tx == drv_context->tx_end))
> +               return 0;
> +
> +       write_SSDR(*(u16 *)(drv_context->tx), reg);
> +       drv_context->tx += 2;
> +
> +       return 1;
> +}
> +
> +static int u16_reader(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       while ((read_SSSR(reg) & SSSR_RNE)
> +               && (drv_context->rx < drv_context->rx_end)) {
> +               *(u16 *)(drv_context->rx) = read_SSDR(reg);
> +               drv_context->rx += 2;
> +       }
> +
> +       return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static int u32_writer(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +               || (drv_context->tx == drv_context->tx_end))
> +               return 0;
> +
> +       write_SSDR(*(u32 *)(drv_context->tx), reg);
> +       drv_context->tx += 4;
> +
> +       return 1;
> +}
> +
> +static int u32_reader(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       while ((read_SSSR(reg) & SSSR_RNE)
> +               && (drv_context->rx < drv_context->rx_end)) {
> +               *(u32 *)(drv_context->rx) = read_SSDR(reg);
> +               drv_context->rx += 4;
> +       }
> +
> +       return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static bool chan_filter(struct dma_chan *chan, void *param)
> +{
> +       struct ssp_driver_context *drv_context =
> +               (struct ssp_driver_context *)param;
> +       bool ret = false;
> +
> +       if (!drv_context->dmac1)
> +               return ret;
> +
> +       if (chan->device->dev == &drv_context->dmac1->dev)
> +               ret = true;
> +
> +       return ret;
> +}
> +
> +/**
> + * unmap_dma_buffers() - Unmap the DMA buffers used during the last
> transfer.
> + * @drv_context:       Pointer to the private driver context
> + */
> +static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
> +{
> +       struct device *dev = &drv_context->pdev->dev;
> +
> +       if (!drv_context->dma_mapped)
> +               return;
> +       dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
> +               PCI_DMA_FROMDEVICE);
> +       dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
> +               PCI_DMA_TODEVICE);
> +       drv_context->dma_mapped = 0;
> +}
> +
> +/**
> + * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
> + * @arg:       Pointer to the data provided at callback registration
> + *
> + * This function is set as callback for both RX and TX DMA transfers. The
> + * RX or TX 'done' flag is set acording to the direction of the ended
> + * transfer. Then, if both RX and TX flags are set, it means that the
> + * transfer job is completed.
> + */
> +static void intel_mid_ssp_spi_dma_done(void *arg)
> +{
> +       struct callback_param *cb_param = (struct callback_param *)arg;
> +       struct ssp_driver_context *drv_context = cb_param->drv_context;
> +       struct device *dev = &drv_context->pdev->dev;
> +       void *reg = drv_context->ioaddr;
> +
> +       if (cb_param->direction == TX_DIRECTION)
> +               drv_context->txdma_done = 1;
> +       else
> +               drv_context->rxdma_done = 1;
> +
> +       dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX
> done:%d]\n",
> +               cb_param->direction, drv_context->rxdma_done,
> +               drv_context->txdma_done);
> +
> +       if (drv_context->txdma_done && drv_context->rxdma_done) {
> +               /* Clear Status Register */
> +               write_SSSR(drv_context->clear_sr, reg);
> +               dev_dbg(dev, "DMA done\n");
> +               /* Disable Triggers to DMA or to CPU*/
> +               disable_triggers(drv_context);
> +               unmap_dma_buffers(drv_context);
> +
> +               queue_work(drv_context->dma_wq,
> &drv_context->complete_work);
> +       }
> +}
> +
> +/**
> + * intel_mid_ssp_spi_dma_init() - Initialize DMA
> + * @drv_context:       Pointer to the private driver context
> + *
> + * This function is called at driver setup phase to allocate DMA
> + * ressources.
> + */
> +static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context
> *drv_context)
> +{
> +       struct intel_mid_dma_slave *rxs, *txs;
> +       struct dma_slave_config *ds;
> +       dma_cap_mask_t mask;
> +       struct device *dev = &drv_context->pdev->dev;
> +       unsigned int device_id;
> +
> +       /* Configure RX channel parameters */
> +       rxs = &drv_context->dmas_rx;
> +       ds = &rxs->dma_slave;
> +
> +       ds->direction = DMA_FROM_DEVICE;
> +       rxs->hs_mode = LNW_DMA_HW_HS;
> +       rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
> +       ds->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
> +       ds->src_addr_width = drv_context->n_bytes;
> +
> +       /* Use a DMA burst according to the FIFO thresholds */
> +       if (drv_context->rx_fifo_threshold == 8) {
> +               ds->src_maxburst = 8;
> +               ds->dst_maxburst = 8;
> +       } else if (drv_context->rx_fifo_threshold == 4) {
> +               ds->src_maxburst = 4;
> +               ds->dst_maxburst = 4;
> +       } else {
> +               ds->src_maxburst = 1;
> +               ds->dst_maxburst = 1;
> +       }
> +
> +       /* Configure TX channel parameters */
> +       txs = &drv_context->dmas_tx;
> +       ds = &txs->dma_slave;
> +
> +       ds->direction = DMA_TO_DEVICE;
> +       txs->hs_mode = LNW_DMA_HW_HS;
> +       txs->cfg_mode = LNW_DMA_MEM_TO_PER;
> +       ds->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
> +       ds->dst_addr_width = drv_context->n_bytes;
> +
> +       /* Use a DMA burst according to the FIFO thresholds */
> +       if (drv_context->rx_fifo_threshold == 8) {
> +               ds->src_maxburst = 8;
> +               ds->dst_maxburst = 8;
> +       } else if (drv_context->rx_fifo_threshold == 4) {
> +               ds->src_maxburst = 4;
> +               ds->dst_maxburst = 4;
> +       } else {
> +               ds->src_maxburst = 1;
> +               ds->dst_maxburst = 1;
> +       }
> +
> +       /* Nothing more to do if already initialized */
> +       if (drv_context->dma_initialized)
> +               return;
> +
> +       /* Use DMAC1 */
> +       if (drv_context->quirks & QUIRKS_PLATFORM_MRST)
> +               device_id = PCI_MRST_DMAC1_ID;
> +       else
> +               device_id = PCI_MDFL_DMAC1_ID;
> +
> +       drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
> +                                                       device_id, NULL);
> +
> +       if (!drv_context->dmac1) {
> +               dev_err(dev, "Can't find DMAC1");
> +               return;
> +       }
> +
> +       if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
> +               drv_context->virt_addr_sram_rx =
> ioremap_nocache(SRAM_BASE_ADDR,
> +                               2 * MAX_SPI_TRANSFER_SIZE);
> +               if (drv_context->virt_addr_sram_rx)
> +                       drv_context->virt_addr_sram_tx =
> +                               drv_context->virt_addr_sram_rx +
> +                               MAX_SPI_TRANSFER_SIZE;
> +               else
> +                       dev_err(dev, "Virt_addr_sram_rx is null\n");
> +       }
> +
> +       /* 1. Allocate rx channel */
> +       dma_cap_zero(mask);
> +       dma_cap_set(DMA_MEMCPY, mask);
> +       dma_cap_set(DMA_SLAVE, mask);
> +
> +       drv_context->rxchan = dma_request_channel(mask, chan_filter,
> +               drv_context);
> +       if (!drv_context->rxchan)
> +               goto err_exit;
> +
> +       drv_context->rxchan->private = rxs;
> +
> +       /* 2. Allocate tx channel */
> +       dma_cap_set(DMA_SLAVE, mask);
> +       dma_cap_set(DMA_MEMCPY, mask);
> +
> +       drv_context->txchan = dma_request_channel(mask, chan_filter,
> +               drv_context);
> +
> +       if (!drv_context->txchan)
> +               goto free_rxchan;
> +       else
> +               drv_context->txchan->private = txs;
> +
> +       /* set the dma done bit to 1 */
> +       drv_context->txdma_done = 1;
> +       drv_context->rxdma_done = 1;
> +
> +       drv_context->tx_param.drv_context  = drv_context;
> +       drv_context->tx_param.direction = TX_DIRECTION;
> +       drv_context->rx_param.drv_context  = drv_context;
> +       drv_context->rx_param.direction = RX_DIRECTION;
> +
> +       drv_context->dma_initialized = 1;
> +
> +       return;
> +
> +free_rxchan:
> +       dma_release_channel(drv_context->rxchan);
> +err_exit:
> +       dev_err(dev, "Error : DMA Channel Not available\n");
> +
> +       if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
> +               iounmap(drv_context->virt_addr_sram_rx);
> +
> +       pci_dev_put(drv_context->dmac1);
> +       return;
> +}
> +
> +/**
> + * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
> + * @drv_context:       Pointer to the private driver context
> + */
> +static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context
> *drv_context)
> +{
> +       dma_release_channel(drv_context->txchan);
> +       dma_release_channel(drv_context->rxchan);
> +
> +       if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
> +               iounmap(drv_context->virt_addr_sram_rx);
> +
> +       pci_dev_put(drv_context->dmac1);
> +}
> +
> +/**
> + * dma_transfer() - Initiate a DMA transfer
> + * @drv_context:       Pointer to the private driver context
> + */
> +static void dma_transfer(struct ssp_driver_context *drv_context)
> +{
> +       dma_addr_t ssdr_addr;
> +       struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
> +       struct dma_chan *txchan, *rxchan;
> +       enum dma_ctrl_flags flag;
> +       struct device *dev = &drv_context->pdev->dev;
> +
> +       /* get Data Read/Write address */
> +       ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
> +
> +       if (drv_context->tx_dma)
> +               drv_context->txdma_done = 0;
> +
> +       if (drv_context->rx_dma)
> +               drv_context->rxdma_done = 0;
> +
> +       /* 2. prepare the RX dma transfer */
> +       txchan = drv_context->txchan;
> +       rxchan = drv_context->rxchan;
> +
> +       flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
> +
> +       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
> +               /* Since the DMA is configured to do 32bits access */
> +               /* to/from the DDR, the DMA transfer size must be  */
> +               /* a multiple of 4 bytes                           */
> +               drv_context->len_dma_rx = drv_context->len & ~(4 - 1);
> +               drv_context->len_dma_tx = drv_context->len_dma_rx;
> +
> +               /* In Rx direction, TRAIL Bytes are handled by memcpy */
> +               if (drv_context->rx_dma &&
> +                       (drv_context->len_dma_rx >
> +                       drv_context->rx_fifo_threshold *
> drv_context->n_bytes))
> +                       drv_context->len_dma_rx =
> +                                       TRUNCATE(drv_context->len_dma_rx,
> +                                       drv_context->rx_fifo_threshold *
> +                                       drv_context->n_bytes);
> +               else if (!drv_context->rx_dma)
> +                       dev_err(dev, "ERROR : rx_dma is null\r\n");
> +       } else {
> +               /* TRAIL Bytes are handled by DMA */
> +               if (drv_context->rx_dma) {
> +                       drv_context->len_dma_rx = drv_context->len;
> +                       drv_context->len_dma_tx = drv_context->len;
> +               } else {
> +                       dev_err(dev, "ERROR : drv_context->rx_dma is
> null!\n");
> +               }
> +       }
> +
> +       rxdesc = rxchan->device->device_prep_dma_memcpy
> +               (rxchan,                                /* DMA Channel */
> +               drv_context->rx_dma,                    /* DAR */
> +               ssdr_addr,                              /* SAR */
> +               drv_context->len_dma_rx,                /* Data Length */
> +               flag);                                  /* Flag */
> +
> +       if (rxdesc) {
> +               rxdesc->callback = intel_mid_ssp_spi_dma_done;
> +               rxdesc->callback_param = &drv_context->rx_param;
> +       } else {
> +               dev_dbg(dev, "rxdesc is null! (len_dma_rx:%zd)\n",
> +                       drv_context->len_dma_rx);
> +               drv_context->rxdma_done = 1;
> +       }
> +
> +       /* 3. prepare the TX dma transfer */
> +       if (drv_context->tx_dma) {
> +               txdesc = txchan->device->device_prep_dma_memcpy
> +               (txchan,                                /* DMA Channel */
> +               ssdr_addr,                              /* DAR */
> +               drv_context->tx_dma,                    /* SAR */
> +               drv_context->len_dma_tx,                /* Data Length */
> +               flag);                                  /* Flag */
> +               if (txdesc) {
> +                       txdesc->callback = intel_mid_ssp_spi_dma_done;
> +                       txdesc->callback_param = &drv_context->tx_param;
> +               } else {
> +                       dev_dbg(dev, "txdesc is null! (len_dma_tx:%zd)\n",
> +                               drv_context->len_dma_tx);
> +                       drv_context->txdma_done = 1;
> +               }
> +       } else {
> +               dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
> +               return;
> +       }
> +
> +       dev_info(dev, "DMA transfer len:%zd len_dma_tx:%zd
> len_dma_rx:%zd\n",
> +               drv_context->len, drv_context->len_dma_tx,
> +               drv_context->len_dma_rx);
> +
> +       if (rxdesc || txdesc) {
> +               if (rxdesc) {
> +                       dev_dbg(dev, "Firing DMA RX channel\n");
> +                       rxdesc->tx_submit(rxdesc);
> +               }
> +               if (txdesc) {
> +                       dev_dbg(dev, "Firing DMA TX channel\n");
> +                       txdesc->tx_submit(txdesc);
> +               }
> +       } else {
> +               struct callback_param cb_param;
> +               cb_param.drv_context = drv_context;
> +               dev_dbg(dev, "Bypassing DMA transfer\n");
> +               intel_mid_ssp_spi_dma_done(&cb_param);
> +       }
> +}
> +
> +/**
> + * map_dma_buffers() - Map DMA buffer before a transfer
> + * @drv_context:       Pointer to the private drivzer context
> + */
> +static int map_dma_buffers(struct ssp_driver_context *drv_context)
> +{
> +       struct device *dev = &drv_context->pdev->dev;
> +
> +       if (unlikely(drv_context->dma_mapped)) {
> +               dev_err(dev, "ERROR : DMA buffers already mapped\n");
> +               return 0;
> +       }
> +       if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
> +               /* Copy drv_context->tx into sram_tx */
> +               memcpy_toio(drv_context->virt_addr_sram_tx,
> drv_context->tx,
> +                       drv_context->len);
> +#ifdef DUMP_RX
> +               dump_trailer(&drv_context->pdev->dev, drv_context->tx,
> +                       drv_context->len, 16);
> +#endif
> +               drv_context->rx_dma = SRAM_RX_ADDR;
> +               drv_context->tx_dma = SRAM_TX_ADDR;
> +       } else {
> +               /* no QUIRKS_SRAM_ADDITIONAL_CPY */
> +               if (unlikely(drv_context->dma_mapped))
> +                       return 1;
> +
> +               drv_context->tx_dma =
> +                       dma_map_single(dev, drv_context->tx,
> drv_context->len,
> +                               PCI_DMA_TODEVICE);
> +               if (unlikely(dma_mapping_error(dev, drv_context->tx_dma)))
> {
> +                       dev_err(dev, "ERROR : tx dma mapping failed\n");
> +                       return 0;
> +               }
> +
> +               drv_context->rx_dma =
> +                       dma_map_single(dev, drv_context->rx,
> drv_context->len,
> +                               PCI_DMA_FROMDEVICE);
> +               if (unlikely(dma_mapping_error(dev, drv_context->rx_dma)))
> {
> +                       dma_unmap_single(dev, drv_context->tx_dma,
> +                               drv_context->len, DMA_TO_DEVICE);
> +                       dev_err(dev, "ERROR : rx dma mapping failed\n");
> +                       return 0;
> +               }
> +       }
> +       return 1;
> +}
> +
> +/**
> + * drain_trail() - Handle trailing bytes of a transfer
> + * @drv_context:       Pointer to the private driver context
> + *
> + * This function handles the trailing bytes of a transfer for the case
> + * they are not handled by the DMA.
> + */
> +void drain_trail(struct ssp_driver_context *drv_context)
> +{
> +       struct device *dev = &drv_context->pdev->dev;
> +       void *reg = drv_context->ioaddr;
> +
> +       if (drv_context->len != drv_context->len_dma_rx) {
> +               dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
> +                       read_SSSR(reg));
> +               drv_context->rx += drv_context->len_dma_rx;
> +               drv_context->tx += drv_context->len_dma_tx;
> +
> +               while ((drv_context->tx != drv_context->tx_end) ||
> +                       (drv_context->rx != drv_context->rx_end)) {
> +                       drv_context->read(drv_context);
> +                       drv_context->write(drv_context);
> +               }
> +       }
> +}
> +
> +/**
> + * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
> + * @drv_context:       Pointer to the private driver context
> + */
> +static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
> +{
> +       u32 length = drv_context->len;
> +
> +       if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
> +               && (drv_context->len > drv_context->rx_fifo_threshold *
> +               drv_context->n_bytes))
> +               length = TRUNCATE(drv_context->len,
> +                       drv_context->rx_fifo_threshold *
> drv_context->n_bytes);
> +
> +       memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx,
> length);
> +}
> +
> +static void int_transfer_complete(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       struct spi_message *msg;
> +       struct device *dev = &drv_context->pdev->dev;
> +
> +       if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
> +               pm_qos_update_request(&drv_context->pm_qos_req,
> +                                       PM_QOS_DEFAULT_VALUE);
> +
> +       if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
> +               sram_to_ddr_cpy(drv_context);
> +
> +       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
> +               drain_trail(drv_context);
> +       else
> +               /* Stop getting Time Outs */
> +               write_SSTO(0, reg);
> +
> +       drv_context->cur_msg->status = 0;
> +       drv_context->cur_msg->actual_length = drv_context->len;
> +
> +#ifdef DUMP_RX
> +       dump_trailer(dev, drv_context->rx, drv_context->len, 16);
> +#endif
> +
> +       dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
> +       msg = drv_context->cur_msg;
> +       if (likely(msg->complete))
> +               msg->complete(msg->context);
> +}
> +
> +static void int_transfer_complete_work(struct work_struct *work)
> +{
> +       struct ssp_driver_context *drv_context = container_of(work,
> +                               struct ssp_driver_context, complete_work);
> +
> +       int_transfer_complete(drv_context);
> +}
> +
> +static void poll_transfer_complete(struct ssp_driver_context *drv_context)
> +{
> +       struct spi_message *msg;
> +
> +       /* Update total byte transfered return count actual bytes read */
> +       drv_context->cur_msg->actual_length +=
> +               drv_context->len - (drv_context->rx_end - drv_context->rx);
> +
> +       drv_context->cur_msg->status = 0;
> +
> +       msg = drv_context->cur_msg;
> +       if (likely(msg->complete))
> +               msg->complete(msg->context);
> +}
> +
> +/**
> + * ssp_int() - Interrupt handler
> + * @irq
> + * @dev_id
> + *
> + * The SSP interrupt is not used for transfer which are handled by
> + * DMA or polling: only under/over run are catched to detect
> + * broken transfers.
> + */
> +static irqreturn_t ssp_int(int irq, void *dev_id)
> +{
> +       struct ssp_driver_context *drv_context = dev_id;
> +       void *reg = drv_context->ioaddr;
> +       struct device *dev = &drv_context->pdev->dev;
> +       u32 status = read_SSSR(reg);
> +
> +       /* It should never be our interrupt since SSP will */
> +       /* only trigs interrupt for under/over run.        */
> +       if (likely(!(status & drv_context->mask_sr)))
> +               return IRQ_NONE;
> +
> +       if (status & SSSR_ROR || status & SSSR_TUR) {
> +               dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",
> status);
> +               WARN_ON(1);
> +               if (status & SSSR_ROR)
> +                       dev_err(dev, "we have Overrun\n");
> +               if (status & SSSR_TUR)
> +                       dev_err(dev, "we have Underrun\n");
> +       }
> +
> +       /* We can fall here when not using DMA mode */
> +       if (!drv_context->cur_msg) {
> +               disable_interface(drv_context);
> +               disable_triggers(drv_context);
> +       }
> +       /* clear status register */
> +       write_SSSR(drv_context->clear_sr, reg);
> +       return IRQ_HANDLED;
> +}
> +
> +static void poll_transfer(unsigned long data)
> +{
> +       struct ssp_driver_context *drv_context =
> +               (struct ssp_driver_context *)data;
> +
> +       if (drv_context->tx)
> +               while (drv_context->tx != drv_context->tx_end) {
> +                       drv_context->write(drv_context);
> +                       drv_context->read(drv_context);
> +               }
> +
> +       while (!drv_context->read(drv_context))
> +               cpu_relax();
> +
> +       poll_transfer_complete(drv_context);
> +}
> +
> +/**
> + * start_bitbanging() - Clock synchronization by bit banging
> + * @drv_context:       Pointer to private driver context
> + *
> + * This clock synchronization will be removed as soon as it is
> + * handled by the SCU.
> + */
> +static void start_bitbanging(struct ssp_driver_context *drv_context)
> +{
> +       u32 sssr;
> +       u32 count = 0;
> +       u32 cr0;
> +       void *i2c_reg = drv_context->I2C_ioaddr;
> +       struct device *dev = &drv_context->pdev->dev;
> +       void *reg = drv_context->ioaddr;
> +       struct chip_data *chip =
> spi_get_ctldata(drv_context->cur_msg->spi);
> +       cr0 = chip->cr0;
> +
> +       dev_warn(dev, "In %s : Starting bit banging\n",\
> +               __func__);
> +       if (read_SSSR(reg) & SSP_NOT_SYNC)
> +               dev_warn(dev, "SSP clock desynchronized.\n");
> +       if (!(read_SSCR0(reg) & SSCR0_SSE))
> +               dev_warn(dev, "in SSCR0, SSP disabled.\n");
> +
> +       dev_dbg(dev, "SSP not ready, start CLK sync\n");
> +
> +       write_SSCR0(cr0 & ~SSCR0_SSE, reg);
> +       write_SSPSP(0x02010007, reg);
> +
> +       write_SSTO(chip->timeout, reg);
> +       write_SSCR0(cr0, reg);
> +
> +       /*
> +       *  This routine uses the DFx block to override the SSP inputs
> +       *  and outputs allowing us to bit bang SSPSCLK. On Langwell,
> +       *  we have to generate the clock to clear busy.
> +       */
> +       write_I2CDATA(0x3, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CCTRL(0x01070034, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CDATA(0x00000099, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CCTRL(0x01070038, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       sssr = read_SSSR(reg);
> +
> +       /* Bit bang the clock until CSS clears */
> +       while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
> +               write_I2CDATA(0x2, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               write_I2CCTRL(0x01070034, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               write_I2CDATA(0x3, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               write_I2CCTRL(0x01070034, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               sssr = read_SSSR(reg);
> +               count++;
> +       }
> +       if (count >= MAX_BITBANGING_LOOP)
> +               dev_err(dev,
> +                       "ERROR in %s : infinite loop on bit banging.
> Aborting\n",
> +                       __func__);
> +
> +       dev_dbg(dev, "---Bit bang count=%d\n", count);
> +
> +       write_I2CDATA(0x0, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CCTRL(0x01070038, i2c_reg);
> +}
> +
> +static unsigned int ssp_get_clk_div(int speed)
> +{
> +       return max(100000000 / speed, 4) - 1;
> +}
> +
> +/**
> + * transfer() - Start a SPI transfer
> + * @spi:       Pointer to the spi_device struct
> + * @msg:       Pointer to the spi_message struct
> + */
> +static int transfer(struct spi_device *spi, struct spi_message *msg)
> +{
> +       struct ssp_driver_context *drv_context = \
> +       spi_master_get_devdata(spi->master);
> +       struct chip_data *chip = NULL;
> +       struct spi_transfer *transfer = NULL;
> +       void *reg = drv_context->ioaddr;
> +       u32 cr1;
> +       struct device *dev = &drv_context->pdev->dev;
> +       chip = spi_get_ctldata(msg->spi);
> +
> +       msg->actual_length = 0;
> +       msg->status = -EINPROGRESS;
> +       drv_context->cur_msg = msg;
> +
> +       /* We handle only one transfer message since the protocol module
> has to
> +          control the out of band signaling. */
> +       transfer = list_entry(msg->transfers.next,
> +                                       struct spi_transfer,
> +                                       transfer_list);
> +
> +       /* Check transfer length */
> +       if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
> +               (transfer->len == 0))) {
> +               dev_warn(dev, "transfer length null or greater than %d\n",
> +                       MAX_SPI_TRANSFER_SIZE);
> +               dev_warn(dev, "length = %d\n", transfer->len);
> +               msg->status = -EINVAL;
> +
> +               if (msg->complete)
> +                       msg->complete(msg->context);
> +
> +               return 0;
> +       }
> +
> +       /* Flush any remaining data (in case of failed previous transfer)
> */
> +       flush(drv_context);
> +
> +       drv_context->tx  = (void *)transfer->tx_buf;
> +       drv_context->rx  = (void *)transfer->rx_buf;
> +       drv_context->len = transfer->len;
> +       drv_context->write = chip->write;
> +       drv_context->read = chip->read;
> +
> +       if (likely(chip->dma_enabled)) {
> +               drv_context->dma_mapped = map_dma_buffers(drv_context);
> +               if (unlikely(!drv_context->dma_mapped))
> +                       return 0;
> +       } else {
> +               drv_context->write = drv_context->tx ?
> +                       chip->write : null_writer;
> +               drv_context->read  = drv_context->rx ?
> +                       chip->read : null_reader;
> +       }
> +       drv_context->tx_end = drv_context->tx + transfer->len;
> +       drv_context->rx_end = drv_context->rx + transfer->len;
> +
> +       /* Clear status  */
> +       write_SSSR(drv_context->clear_sr, reg);
> +
> +       /* setup the CR1 control register */
> +       cr1 = chip->cr1 | drv_context->cr1_sig;
> +
> +       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
> +               /* in case of len smaller than burst size, adjust the RX
>   */
> +               /* threshold. All other cases will use the default
> threshold */
> +               /* value. The RX fifo threshold must be aligned with the
> DMA */
> +               /* RX transfer size, which may be limited to a multiple of
> 4 */
> +               /* bytes due to 32bits DDR access.
>   */
> +               if  (drv_context->len / drv_context->n_bytes <=
> +                       drv_context->rx_fifo_threshold) {
> +                       u32 rx_fifo_threshold;
> +
> +                       rx_fifo_threshold = (drv_context->len & ~(4 - 1)) /
> +                               drv_context->n_bytes;
> +                       cr1 &= ~(SSCR1_RFT);
> +                       cr1 |= SSCR1_RxTresh(rx_fifo_threshold)
> +                                       & SSCR1_RFT;
> +               } else {
> +                       write_SSTO(chip->timeout, reg);
> +               }
> +       }
> +
> +       dev_dbg(dev,
> +               "transfer len:%zd  n_bytes:%d  cr0:%x  cr1:%x",
> +               drv_context->len, drv_context->n_bytes, chip->cr0, cr1);
> +
> +       /* first set CR1 */
> +       write_SSCR1(cr1, reg);
> +
> +       /* Do bitbanging only if SSP not-enabled or not-synchronized */
> +       if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
> +               (!(read_SSCR0(reg) & SSCR0_SSE))) &&
> +               (drv_context->quirks & QUIRKS_BIT_BANGING))) {
> +                       start_bitbanging(drv_context);
> +       } else {
> +               /* (re)start the SSP */
> +               write_SSCR0(chip->cr0, reg);
> +       }
> +
> +       if (likely(chip->dma_enabled)) {
> +               if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
> +                       pm_qos_update_request(&drv_context->pm_qos_req,
> +                               MIN_EXIT_LATENCY);
> +               dma_transfer(drv_context);
> +       } else {
> +               tasklet_schedule(&drv_context->poll_transfer);
> +       }
> +
> +       return 0;
> +}
> +
> +/**
> + * setup() - Driver setup procedure
> + * @spi:       Pointeur to the spi_device struct
> + */
> +static int setup(struct spi_device *spi)
> +{
> +       struct intel_mid_ssp_spi_chip *chip_info = NULL;
> +       struct chip_data *chip;
> +       struct ssp_driver_context *drv_context =
> +               spi_master_get_devdata(spi->master);
> +       u32 tx_fifo_threshold;
> +       u32 burst_size;
> +       u32 clk_div;
> +
> +       if (!spi->bits_per_word)
> +               spi->bits_per_word = DFLT_BITS_PER_WORD;
> +
> +       if ((spi->bits_per_word < MIN_BITS_PER_WORD
> +               || spi->bits_per_word > MAX_BITS_PER_WORD))
> +               return -EINVAL;
> +
> +       chip = spi_get_ctldata(spi);
> +       if (!chip) {
> +               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
> +               if (!chip) {
> +                       dev_err(&spi->dev,
> +                       "failed setup: can't allocate chip data\n");
> +                       return -ENOMEM;
> +               }
> +       }
> +       chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word >
> 16 ?
> +               spi->bits_per_word - 16 : spi->bits_per_word)
> +                       | SSCR0_SSE
> +                       | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
> +
> +       /* protocol drivers may change the chip settings, so...  */
> +       /* if chip_info exists, use it                           */
> +       chip_info = spi->controller_data;
> +
> +       /* chip_info isn't always needed */
> +       chip->cr1 = 0;
> +       if (chip_info) {
> +               burst_size = chip_info->burst_size;
> +               if (burst_size > IMSS_FIFO_BURST_8)
> +                       burst_size = DFLT_FIFO_BURST_SIZE;
> +
> +               chip->timeout = chip_info->timeout;
> +
> +               if (chip_info->enable_loopback)
> +                       chip->cr1 |= SSCR1_LBM;
>
> Who sets the enable_loopback?


<snip>


> +/* spi_board_info.controller_data for SPI slave devices,
> + * copied to spi_device.platform_data ... mostly for dma tuning
> + */
> +struct intel_mid_ssp_spi_chip {
> +       enum intel_mid_ssp_spi_fifo_burst burst_size;
> +       u32 timeout;
> +       u8 enable_loopback;
> +       u8 dma_enabled;
> +};
> +
> +
> +#define SPI_DIB_NAME_LEN  16
> +#define SPI_DIB_SPEC_INFO_LEN      10
> +
> +struct spi_dib_header {
> +       u32       signature;
> +       u32       length;
> +       u8         rev;
> +       u8         checksum;
> +       u8         dib[0];
> +} __packed;
> +
> +#endif /*INTEL_MID_SSP_SPI_H_*/
> --
> 1.7.1
>
>
>
>
>
> ------------------------------------------------------------------------------
> Monitor your physical, virtual and cloud infrastructure from a single
> web console. Get in-depth insight into apps, servers, databases, vmware,
> SAP, cloud infrastructure, etc. Download 30-day Free Trial.
> Pricing starts from $795 for 25 servers or applications!
> http://p.sf.net/sfu/zoho_dev2dev_nov
> _______________________________________________
> spi-devel-general mailing list
> spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
> https://lists.sourceforge.net/lists/listinfo/spi-devel-general
>
------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-11-21  2:16 [PATCH] SPI: SSP SPI Controller driver chao bi
  2012-11-21 12:08 ` Shubhrajyoti Datta
@ 2012-11-21 12:14 ` Shubhrajyoti Datta
       [not found]   ` <CAM=Q2cu6ReS-6sJxdacnw=FYGdoFed9bM1gA6yFEtmVjs8KQTA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2012-12-06 12:38 ` Grant Likely
  2012-12-17 11:23 ` Linus Walleij
  3 siblings, 1 reply; 26+ messages in thread
From: Shubhrajyoti Datta @ 2012-11-21 12:14 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Wed, Nov 21, 2012 at 7:46 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

> +       /* Create the PM_QOS request */
> +       if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> +               pm_qos_add_request(&drv_context->pm_qos_req,
> +               PM_QOS_CPU_DMA_LATENCY,
> +               PM_QOS_DEFAULT_VALUE);
>

What happens if the flag is not set if it is absolutely necessary for the
driver it should not be a
configurable option?
------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]   ` <CAM=Q2cu6ReS-6sJxdacnw=FYGdoFed9bM1gA6yFEtmVjs8KQTA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2012-11-21 12:26     ` Alan Cox
       [not found]       ` <20121121122630.13fc2087-Z/y2cZnRghHXmaaqVzeoHQ@public.gmane.org>
  0 siblings, 1 reply; 26+ messages in thread
From: Alan Cox @ 2012-11-21 12:26 UTC (permalink / raw)
  To: Shubhrajyoti Datta
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, chao bi,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

On Wed, 21 Nov 2012 17:44:21 +0530
Shubhrajyoti Datta <omaplinuxkernel-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> On Wed, Nov 21, 2012 at 7:46 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> 
> > +       /* Create the PM_QOS request */
> > +       if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> > +               pm_qos_add_request(&drv_context->pm_qos_req,
> > +               PM_QOS_CPU_DMA_LATENCY,
> > +               PM_QOS_DEFAULT_VALUE);
> >
> 
> What happens if the flag is not set if it is absolutely necessary for
> the driver it should not be a
> configurable option

If you read through the code it's set only when the device is
Moorestown/Oaktrail based and only in slave mode. It is not necessary
in other configurations.

Alan

------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: [PATCH] SPI: SSP SPI Controller driver
       [not found]   ` <CAM=Q2cvoEMScnCmfrhoAueZ8bfPCX90TxZmsSigfeRbGeXbzMA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2012-11-22  3:26     ` Bi, Chao
       [not found]       ` <253F3AA5ECB4EC43A2CA0147545F67F2102B5D40-0J0gbvR4kTiiAffOGbnezLfspsVTdybXVpNB7YpNyf8@public.gmane.org>
  0 siblings, 1 reply; 26+ messages in thread
From: Bi, Chao @ 2012-11-22  3:26 UTC (permalink / raw)
  To: Shubhrajyoti Datta
  Cc: Chen, Jun D, Mills, Ken K, Centelles, Sylvain,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

+               if (chip_info->enable_loopback)
+                       chip->cr1 |= SSCR1_LBM;
Who sets the enable_loopback?

[Chao] 'enable_loopback' could be configured by SPI Protocol driver before it setup SPI controller. Generally it is not set by default because it's used for test and validation.

Thanks
From: Shubhrajyoti Datta [mailto:omaplinuxkernel-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org]
Sent: Wednesday, November 21, 2012 8:08 PM
To: Bi, Chao
Cc: grant.likely-s3s/WqlpOiPyB63q8FvJNQ@public.gmane.org; Chen, Jun D; spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org; alan-VuQAYsv1563Yd54FQh9/CA@public.gmane.org; Mills, Ken K; Centelles, Sylvain
Subject: Re: [PATCH] SPI: SSP SPI Controller driver


On Wed, Nov 21, 2012 at 7:46 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>> wrote:

This patch is to implement SSP SPI controller driver, which has been applied and
validated on intel Moorestown & Medfield platform. The patch are originated by
Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>> and Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>,
and to be further developed by Channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>> and Chen Jun
<jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>> according to their integration & validation on Medfield platform.

Signed-off-by: Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>
Signed-off-by: Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>
Signed-off-by: channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>
Signed-off-by: Chen Jun <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>
---
 drivers/spi/Kconfig                   |    9 +
 drivers/spi/Makefile                  |    1 +
 drivers/spi/spi-intel-mid-ssp.c       | 1407 +++++++++++++++++++++++++++++++++
 include/linux/spi/spi-intel-mid-ssp.h |  326 ++++++++
 4 files changed, 1743 insertions(+), 0 deletions(-)
 create mode 100644 drivers/spi/spi-intel-mid-ssp.c
 create mode 100644 include/linux/spi/spi-intel-mid-ssp.h

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae35..8b4461b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -179,6 +179,15 @@ config SPI_IMX
          This enables using the Freescale i.MX SPI controllers in master
          mode.

+config SPI_INTEL_MID_SSP
+       tristate "SSP SPI controller driver for Intel MID platforms"
+       depends on SPI_MASTER && INTEL_MID_DMAC
+       help
+         This is the unified SSP SPI master controller driver for
+         the Intel MID platforms, handling Moorestown & Medfield,
+         master clock mode.
+         It supports Bulverde SSP core.
+
 config SPI_LM70_LLP
        tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
        depends on PARPORT && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47..83f06d0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_FSL_ESPI)            += spi-fsl-espi.o
 obj-$(CONFIG_SPI_FSL_SPI)              += spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)                 += spi-gpio.o
 obj-$(CONFIG_SPI_IMX)                  += spi-imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)                += spi-intel-mid-ssp.o
 obj-$(CONFIG_SPI_LM70_LLP)             += spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)          += spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)          += spi-mpc52xx-psc.o
diff --git a/drivers/spi/spi-intel-mid-ssp.c b/drivers/spi/spi-intel-mid-ssp.c
new file mode 100644
index 0000000..8fca48f
--- /dev/null
+++ b/drivers/spi/spi-intel-mid-ssp.c
@@ -0,0 +1,1407 @@
+/*
+ * spi-intel-mid-ssp.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>
+ *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/module.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-intel-mid-ssp.h>
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static const struct pci_device_id pci_ids[];
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+       int tlen1 = (len < sz ? len : sz);
+       int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
+       unsigned char *p;
+       static char msg[MAX_SPI_TRANSFER_SIZE];
+
+       memset(msg, '\0', sizeof(msg));
+       p = buf;
+       while (p < buf + tlen1)
+               sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+       if (tlen2 > 0) {
+               sprintf(msg, "%s .....", msg);
+               p = (buf+len) - tlen2;
+               while (p < buf + len)
+                       sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+       }
+
+       dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+                  len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+       u32 sssr;
+       sssr = read_SSSR(drv_context->ioaddr);
+       if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+               return 0;
+       else
+               return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+       return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       u32 i = 0;
+
+       /* If the transmit fifo is not empty, reset the interface. */
+       if (!is_tx_fifo_empty(drv_context)) {
+               dev_err(&drv_context->pdev->dev,
+                               "TX FIFO not empty. Reset of SPI IF");
+               disable_interface(drv_context);
+               return;
+       }
+
+       dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+       while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
+               read_SSDR(reg);
+               i++;
+       }
+       WARN(i > 0, "%d words flush occured\n", i);
+
+       return;
+}
+
+static int null_writer(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       u8 n_bytes = drv_context->n_bytes;
+
+       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+               || (drv_context->tx == drv_context->tx_end))
+               return 0;
+
+       write_SSDR(0, reg);
+       drv_context->tx += n_bytes;
+
+       return 1;
+}
+
+static int null_reader(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       u8 n_bytes = drv_context->n_bytes;
+
+       while ((read_SSSR(reg) & SSSR_RNE)
+               && (drv_context->rx < drv_context->rx_end)) {
+               read_SSDR(reg);
+               drv_context->rx += n_bytes;
+       }
+
+       return drv_context->rx == drv_context->rx_end;
+}
+
+static int u8_writer(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+               || (drv_context->tx == drv_context->tx_end))
+               return 0;
+
+       write_SSDR(*(u8 *)(drv_context->tx), reg);
+       ++drv_context->tx;
+
+       return 1;
+}
+
+static int u8_reader(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       while ((read_SSSR(reg) & SSSR_RNE)
+               && (drv_context->rx < drv_context->rx_end)) {
+               *(u8 *)(drv_context->rx) = read_SSDR(reg);
+               ++drv_context->rx;
+       }
+
+       return drv_context->rx == drv_context->rx_end;
+}
+
+static int u16_writer(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+               || (drv_context->tx == drv_context->tx_end))
+               return 0;
+
+       write_SSDR(*(u16 *)(drv_context->tx), reg);
+       drv_context->tx += 2;
+
+       return 1;
+}
+
+static int u16_reader(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       while ((read_SSSR(reg) & SSSR_RNE)
+               && (drv_context->rx < drv_context->rx_end)) {
+               *(u16 *)(drv_context->rx) = read_SSDR(reg);
+               drv_context->rx += 2;
+       }
+
+       return drv_context->rx == drv_context->rx_end;
+}
+
+static int u32_writer(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+               || (drv_context->tx == drv_context->tx_end))
+               return 0;
+
+       write_SSDR(*(u32 *)(drv_context->tx), reg);
+       drv_context->tx += 4;
+
+       return 1;
+}
+
+static int u32_reader(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       while ((read_SSSR(reg) & SSSR_RNE)
+               && (drv_context->rx < drv_context->rx_end)) {
+               *(u32 *)(drv_context->rx) = read_SSDR(reg);
+               drv_context->rx += 4;
+       }
+
+       return drv_context->rx == drv_context->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+       struct ssp_driver_context *drv_context =
+               (struct ssp_driver_context *)param;
+       bool ret = false;
+
+       if (!drv_context->dmac1)
+               return ret;
+
+       if (chan->device->dev == &drv_context->dmac1->dev)
+               ret = true;
+
+       return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @drv_context:       Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
+{
+       struct device *dev = &drv_context->pdev->dev;
+
+       if (!drv_context->dma_mapped)
+               return;
+       dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
+               PCI_DMA_FROMDEVICE);
+       dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
+               PCI_DMA_TODEVICE);
+       drv_context->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg:       Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+       struct callback_param *cb_param = (struct callback_param *)arg;
+       struct ssp_driver_context *drv_context = cb_param->drv_context;
+       struct device *dev = &drv_context->pdev->dev;
+       void *reg = drv_context->ioaddr;
+
+       if (cb_param->direction == TX_DIRECTION)
+               drv_context->txdma_done = 1;
+       else
+               drv_context->rxdma_done = 1;
+
+       dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+               cb_param->direction, drv_context->rxdma_done,
+               drv_context->txdma_done);
+
+       if (drv_context->txdma_done && drv_context->rxdma_done) {
+               /* Clear Status Register */
+               write_SSSR(drv_context->clear_sr, reg);
+               dev_dbg(dev, "DMA done\n");
+               /* Disable Triggers to DMA or to CPU*/
+               disable_triggers(drv_context);
+               unmap_dma_buffers(drv_context);
+
+               queue_work(drv_context->dma_wq, &drv_context->complete_work);
+       }
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @drv_context:       Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
+{
+       struct intel_mid_dma_slave *rxs, *txs;
+       struct dma_slave_config *ds;
+       dma_cap_mask_t mask;
+       struct device *dev = &drv_context->pdev->dev;
+       unsigned int device_id;
+
+       /* Configure RX channel parameters */
+       rxs = &drv_context->dmas_rx;
+       ds = &rxs->dma_slave;
+
+       ds->direction = DMA_FROM_DEVICE;
+       rxs->hs_mode = LNW_DMA_HW_HS;
+       rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+       ds->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       ds->src_addr_width = drv_context->n_bytes;
+
+       /* Use a DMA burst according to the FIFO thresholds */
+       if (drv_context->rx_fifo_threshold == 8) {
+               ds->src_maxburst = 8;
+               ds->dst_maxburst = 8;
+       } else if (drv_context->rx_fifo_threshold == 4) {
+               ds->src_maxburst = 4;
+               ds->dst_maxburst = 4;
+       } else {
+               ds->src_maxburst = 1;
+               ds->dst_maxburst = 1;
+       }
+
+       /* Configure TX channel parameters */
+       txs = &drv_context->dmas_tx;
+       ds = &txs->dma_slave;
+
+       ds->direction = DMA_TO_DEVICE;
+       txs->hs_mode = LNW_DMA_HW_HS;
+       txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+       ds->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       ds->dst_addr_width = drv_context->n_bytes;
+
+       /* Use a DMA burst according to the FIFO thresholds */
+       if (drv_context->rx_fifo_threshold == 8) {
+               ds->src_maxburst = 8;
+               ds->dst_maxburst = 8;
+       } else if (drv_context->rx_fifo_threshold == 4) {
+               ds->src_maxburst = 4;
+               ds->dst_maxburst = 4;
+       } else {
+               ds->src_maxburst = 1;
+               ds->dst_maxburst = 1;
+       }
+
+       /* Nothing more to do if already initialized */
+       if (drv_context->dma_initialized)
+               return;
+
+       /* Use DMAC1 */
+       if (drv_context->quirks & QUIRKS_PLATFORM_MRST)
+               device_id = PCI_MRST_DMAC1_ID;
+       else
+               device_id = PCI_MDFL_DMAC1_ID;
+
+       drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                                       device_id, NULL);
+
+       if (!drv_context->dmac1) {
+               dev_err(dev, "Can't find DMAC1");
+               return;
+       }
+
+       if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+               drv_context->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+                               2 * MAX_SPI_TRANSFER_SIZE);
+               if (drv_context->virt_addr_sram_rx)
+                       drv_context->virt_addr_sram_tx =
+                               drv_context->virt_addr_sram_rx +
+                               MAX_SPI_TRANSFER_SIZE;
+               else
+                       dev_err(dev, "Virt_addr_sram_rx is null\n");
+       }
+
+       /* 1. Allocate rx channel */
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+       dma_cap_set(DMA_SLAVE, mask);
+
+       drv_context->rxchan = dma_request_channel(mask, chan_filter,
+               drv_context);
+       if (!drv_context->rxchan)
+               goto err_exit;
+
+       drv_context->rxchan->private = rxs;
+
+       /* 2. Allocate tx channel */
+       dma_cap_set(DMA_SLAVE, mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+
+       drv_context->txchan = dma_request_channel(mask, chan_filter,
+               drv_context);
+
+       if (!drv_context->txchan)
+               goto free_rxchan;
+       else
+               drv_context->txchan->private = txs;
+
+       /* set the dma done bit to 1 */
+       drv_context->txdma_done = 1;
+       drv_context->rxdma_done = 1;
+
+       drv_context->tx_param.drv_context  = drv_context;
+       drv_context->tx_param.direction = TX_DIRECTION;
+       drv_context->rx_param.drv_context  = drv_context;
+       drv_context->rx_param.direction = RX_DIRECTION;
+
+       drv_context->dma_initialized = 1;
+
+       return;
+
+free_rxchan:
+       dma_release_channel(drv_context->rxchan);
+err_exit:
+       dev_err(dev, "Error : DMA Channel Not available\n");
+
+       if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+               iounmap(drv_context->virt_addr_sram_rx);
+
+       pci_dev_put(drv_context->dmac1);
+       return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @drv_context:       Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
+{
+       dma_release_channel(drv_context->txchan);
+       dma_release_channel(drv_context->rxchan);
+
+       if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+               iounmap(drv_context->virt_addr_sram_rx);
+
+       pci_dev_put(drv_context->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @drv_context:       Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_driver_context *drv_context)
+{
+       dma_addr_t ssdr_addr;
+       struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+       struct dma_chan *txchan, *rxchan;
+       enum dma_ctrl_flags flag;
+       struct device *dev = &drv_context->pdev->dev;
+
+       /* get Data Read/Write address */
+       ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
+
+       if (drv_context->tx_dma)
+               drv_context->txdma_done = 0;
+
+       if (drv_context->rx_dma)
+               drv_context->rxdma_done = 0;
+
+       /* 2. prepare the RX dma transfer */
+       txchan = drv_context->txchan;
+       rxchan = drv_context->rxchan;
+
+       flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+               /* Since the DMA is configured to do 32bits access */
+               /* to/from the DDR, the DMA transfer size must be  */
+               /* a multiple of 4 bytes                           */
+               drv_context->len_dma_rx = drv_context->len & ~(4 - 1);
+               drv_context->len_dma_tx = drv_context->len_dma_rx;
+
+               /* In Rx direction, TRAIL Bytes are handled by memcpy */
+               if (drv_context->rx_dma &&
+                       (drv_context->len_dma_rx >
+                       drv_context->rx_fifo_threshold * drv_context->n_bytes))
+                       drv_context->len_dma_rx =
+                                       TRUNCATE(drv_context->len_dma_rx,
+                                       drv_context->rx_fifo_threshold *
+                                       drv_context->n_bytes);
+               else if (!drv_context->rx_dma)
+                       dev_err(dev, "ERROR : rx_dma is null\r\n");
+       } else {
+               /* TRAIL Bytes are handled by DMA */
+               if (drv_context->rx_dma) {
+                       drv_context->len_dma_rx = drv_context->len;
+                       drv_context->len_dma_tx = drv_context->len;
+               } else {
+                       dev_err(dev, "ERROR : drv_context->rx_dma is null!\n");
+               }
+       }
+
+       rxdesc = rxchan->device->device_prep_dma_memcpy
+               (rxchan,                                /* DMA Channel */
+               drv_context->rx_dma,                    /* DAR */
+               ssdr_addr,                              /* SAR */
+               drv_context->len_dma_rx,                /* Data Length */
+               flag);                                  /* Flag */
+
+       if (rxdesc) {
+               rxdesc->callback = intel_mid_ssp_spi_dma_done;
+               rxdesc->callback_param = &drv_context->rx_param;
+       } else {
+               dev_dbg(dev, "rxdesc is null! (len_dma_rx:%zd)\n",
+                       drv_context->len_dma_rx);
+               drv_context->rxdma_done = 1;
+       }
+
+       /* 3. prepare the TX dma transfer */
+       if (drv_context->tx_dma) {
+               txdesc = txchan->device->device_prep_dma_memcpy
+               (txchan,                                /* DMA Channel */
+               ssdr_addr,                              /* DAR */
+               drv_context->tx_dma,                    /* SAR */
+               drv_context->len_dma_tx,                /* Data Length */
+               flag);                                  /* Flag */
+               if (txdesc) {
+                       txdesc->callback = intel_mid_ssp_spi_dma_done;
+                       txdesc->callback_param = &drv_context->tx_param;
+               } else {
+                       dev_dbg(dev, "txdesc is null! (len_dma_tx:%zd)\n",
+                               drv_context->len_dma_tx);
+                       drv_context->txdma_done = 1;
+               }
+       } else {
+               dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
+               return;
+       }
+
+       dev_info(dev, "DMA transfer len:%zd len_dma_tx:%zd len_dma_rx:%zd\n",
+               drv_context->len, drv_context->len_dma_tx,
+               drv_context->len_dma_rx);
+
+       if (rxdesc || txdesc) {
+               if (rxdesc) {
+                       dev_dbg(dev, "Firing DMA RX channel\n");
+                       rxdesc->tx_submit(rxdesc);
+               }
+               if (txdesc) {
+                       dev_dbg(dev, "Firing DMA TX channel\n");
+                       txdesc->tx_submit(txdesc);
+               }
+       } else {
+               struct callback_param cb_param;
+               cb_param.drv_context = drv_context;
+               dev_dbg(dev, "Bypassing DMA transfer\n");
+               intel_mid_ssp_spi_dma_done(&cb_param);
+       }
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @drv_context:       Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_driver_context *drv_context)
+{
+       struct device *dev = &drv_context->pdev->dev;
+
+       if (unlikely(drv_context->dma_mapped)) {
+               dev_err(dev, "ERROR : DMA buffers already mapped\n");
+               return 0;
+       }
+       if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+               /* Copy drv_context->tx into sram_tx */
+               memcpy_toio(drv_context->virt_addr_sram_tx, drv_context->tx,
+                       drv_context->len);
+#ifdef DUMP_RX
+               dump_trailer(&drv_context->pdev->dev, drv_context->tx,
+                       drv_context->len, 16);
+#endif
+               drv_context->rx_dma = SRAM_RX_ADDR;
+               drv_context->tx_dma = SRAM_TX_ADDR;
+       } else {
+               /* no QUIRKS_SRAM_ADDITIONAL_CPY */
+               if (unlikely(drv_context->dma_mapped))
+                       return 1;
+
+               drv_context->tx_dma =
+                       dma_map_single(dev, drv_context->tx, drv_context->len,
+                               PCI_DMA_TODEVICE);
+               if (unlikely(dma_mapping_error(dev, drv_context->tx_dma))) {
+                       dev_err(dev, "ERROR : tx dma mapping failed\n");
+                       return 0;
+               }
+
+               drv_context->rx_dma =
+                       dma_map_single(dev, drv_context->rx, drv_context->len,
+                               PCI_DMA_FROMDEVICE);
+               if (unlikely(dma_mapping_error(dev, drv_context->rx_dma))) {
+                       dma_unmap_single(dev, drv_context->tx_dma,
+                               drv_context->len, DMA_TO_DEVICE);
+                       dev_err(dev, "ERROR : rx dma mapping failed\n");
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @drv_context:       Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_driver_context *drv_context)
+{
+       struct device *dev = &drv_context->pdev->dev;
+       void *reg = drv_context->ioaddr;
+
+       if (drv_context->len != drv_context->len_dma_rx) {
+               dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+                       read_SSSR(reg));
+               drv_context->rx += drv_context->len_dma_rx;
+               drv_context->tx += drv_context->len_dma_tx;
+
+               while ((drv_context->tx != drv_context->tx_end) ||
+                       (drv_context->rx != drv_context->rx_end)) {
+                       drv_context->read(drv_context);
+                       drv_context->write(drv_context);
+               }
+       }
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @drv_context:       Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
+{
+       u32 length = drv_context->len;
+
+       if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+               && (drv_context->len > drv_context->rx_fifo_threshold *
+               drv_context->n_bytes))
+               length = TRUNCATE(drv_context->len,
+                       drv_context->rx_fifo_threshold * drv_context->n_bytes);
+
+       memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_driver_context *drv_context)
+{
+       void *reg = drv_context->ioaddr;
+       struct spi_message *msg;
+       struct device *dev = &drv_context->pdev->dev;
+
+       if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+               pm_qos_update_request(&drv_context->pm_qos_req,
+                                       PM_QOS_DEFAULT_VALUE);
+
+       if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+               sram_to_ddr_cpy(drv_context);
+
+       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+               drain_trail(drv_context);
+       else
+               /* Stop getting Time Outs */
+               write_SSTO(0, reg);
+
+       drv_context->cur_msg->status = 0;
+       drv_context->cur_msg->actual_length = drv_context->len;
+
+#ifdef DUMP_RX
+       dump_trailer(dev, drv_context->rx, drv_context->len, 16);
+#endif
+
+       dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+       msg = drv_context->cur_msg;
+       if (likely(msg->complete))
+               msg->complete(msg->context);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+       struct ssp_driver_context *drv_context = container_of(work,
+                               struct ssp_driver_context, complete_work);
+
+       int_transfer_complete(drv_context);
+}
+
+static void poll_transfer_complete(struct ssp_driver_context *drv_context)
+{
+       struct spi_message *msg;
+
+       /* Update total byte transfered return count actual bytes read */
+       drv_context->cur_msg->actual_length +=
+               drv_context->len - (drv_context->rx_end - drv_context->rx);
+
+       drv_context->cur_msg->status = 0;
+
+       msg = drv_context->cur_msg;
+       if (likely(msg->complete))
+               msg->complete(msg->context);
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+       struct ssp_driver_context *drv_context = dev_id;
+       void *reg = drv_context->ioaddr;
+       struct device *dev = &drv_context->pdev->dev;
+       u32 status = read_SSSR(reg);
+
+       /* It should never be our interrupt since SSP will */
+       /* only trigs interrupt for under/over run.        */
+       if (likely(!(status & drv_context->mask_sr)))
+               return IRQ_NONE;
+
+       if (status & SSSR_ROR || status & SSSR_TUR) {
+               dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n", status);
+               WARN_ON(1);
+               if (status & SSSR_ROR)
+                       dev_err(dev, "we have Overrun\n");
+               if (status & SSSR_TUR)
+                       dev_err(dev, "we have Underrun\n");
+       }
+
+       /* We can fall here when not using DMA mode */
+       if (!drv_context->cur_msg) {
+               disable_interface(drv_context);
+               disable_triggers(drv_context);
+       }
+       /* clear status register */
+       write_SSSR(drv_context->clear_sr, reg);
+       return IRQ_HANDLED;
+}
+
+static void poll_transfer(unsigned long data)
+{
+       struct ssp_driver_context *drv_context =
+               (struct ssp_driver_context *)data;
+
+       if (drv_context->tx)
+               while (drv_context->tx != drv_context->tx_end) {
+                       drv_context->write(drv_context);
+                       drv_context->read(drv_context);
+               }
+
+       while (!drv_context->read(drv_context))
+               cpu_relax();
+
+       poll_transfer_complete(drv_context);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @drv_context:       Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_driver_context *drv_context)
+{
+       u32 sssr;
+       u32 count = 0;
+       u32 cr0;
+       void *i2c_reg = drv_context->I2C_ioaddr;
+       struct device *dev = &drv_context->pdev->dev;
+       void *reg = drv_context->ioaddr;
+       struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
+       cr0 = chip->cr0;
+
+       dev_warn(dev, "In %s : Starting bit banging\n",\
+               __func__);
+       if (read_SSSR(reg) & SSP_NOT_SYNC)
+               dev_warn(dev, "SSP clock desynchronized.\n");
+       if (!(read_SSCR0(reg) & SSCR0_SSE))
+               dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+       dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+       write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+       write_SSPSP(0x02010007, reg);
+
+       write_SSTO(chip->timeout, reg);
+       write_SSCR0(cr0, reg);
+
+       /*
+       *  This routine uses the DFx block to override the SSP inputs
+       *  and outputs allowing us to bit bang SSPSCLK. On Langwell,
+       *  we have to generate the clock to clear busy.
+       */
+       write_I2CDATA(0x3, i2c_reg);
+       udelay(I2C_ACCESS_USDELAY);
+       write_I2CCTRL(0x01070034, i2c_reg);
+       udelay(I2C_ACCESS_USDELAY);
+       write_I2CDATA(0x00000099, i2c_reg);
+       udelay(I2C_ACCESS_USDELAY);
+       write_I2CCTRL(0x01070038, i2c_reg);
+       udelay(I2C_ACCESS_USDELAY);
+       sssr = read_SSSR(reg);
+
+       /* Bit bang the clock until CSS clears */
+       while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+               write_I2CDATA(0x2, i2c_reg);
+               udelay(I2C_ACCESS_USDELAY);
+               write_I2CCTRL(0x01070034, i2c_reg);
+               udelay(I2C_ACCESS_USDELAY);
+               write_I2CDATA(0x3, i2c_reg);
+               udelay(I2C_ACCESS_USDELAY);
+               write_I2CCTRL(0x01070034, i2c_reg);
+               udelay(I2C_ACCESS_USDELAY);
+               sssr = read_SSSR(reg);
+               count++;
+       }
+       if (count >= MAX_BITBANGING_LOOP)
+               dev_err(dev,
+                       "ERROR in %s : infinite loop on bit banging. Aborting\n",
+                       __func__);
+
+       dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+       write_I2CDATA(0x0, i2c_reg);
+       udelay(I2C_ACCESS_USDELAY);
+       write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(int speed)
+{
+       return max(100000000 / speed, 4) - 1;
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi:       Pointer to the spi_device struct
+ * @msg:       Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+       struct ssp_driver_context *drv_context = \
+       spi_master_get_devdata(spi->master);
+       struct chip_data *chip = NULL;
+       struct spi_transfer *transfer = NULL;
+       void *reg = drv_context->ioaddr;
+       u32 cr1;
+       struct device *dev = &drv_context->pdev->dev;
+       chip = spi_get_ctldata(msg->spi);
+
+       msg->actual_length = 0;
+       msg->status = -EINPROGRESS;
+       drv_context->cur_msg = msg;
+
+       /* We handle only one transfer message since the protocol module has to
+          control the out of band signaling. */
+       transfer = list_entry(msg->transfers.next,
+                                       struct spi_transfer,
+                                       transfer_list);
+
+       /* Check transfer length */
+       if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+               (transfer->len == 0))) {
+               dev_warn(dev, "transfer length null or greater than %d\n",
+                       MAX_SPI_TRANSFER_SIZE);
+               dev_warn(dev, "length = %d\n", transfer->len);
+               msg->status = -EINVAL;
+
+               if (msg->complete)
+                       msg->complete(msg->context);
+
+               return 0;
+       }
+
+       /* Flush any remaining data (in case of failed previous transfer) */
+       flush(drv_context);
+
+       drv_context->tx  = (void *)transfer->tx_buf;
+       drv_context->rx  = (void *)transfer->rx_buf;
+       drv_context->len = transfer->len;
+       drv_context->write = chip->write;
+       drv_context->read = chip->read;
+
+       if (likely(chip->dma_enabled)) {
+               drv_context->dma_mapped = map_dma_buffers(drv_context);
+               if (unlikely(!drv_context->dma_mapped))
+                       return 0;
+       } else {
+               drv_context->write = drv_context->tx ?
+                       chip->write : null_writer;
+               drv_context->read  = drv_context->rx ?
+                       chip->read : null_reader;
+       }
+       drv_context->tx_end = drv_context->tx + transfer->len;
+       drv_context->rx_end = drv_context->rx + transfer->len;
+
+       /* Clear status  */
+       write_SSSR(drv_context->clear_sr, reg);
+
+       /* setup the CR1 control register */
+       cr1 = chip->cr1 | drv_context->cr1_sig;
+
+       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+               /* in case of len smaller than burst size, adjust the RX     */
+               /* threshold. All other cases will use the default threshold */
+               /* value. The RX fifo threshold must be aligned with the DMA */
+               /* RX transfer size, which may be limited to a multiple of 4 */
+               /* bytes due to 32bits DDR access.                           */
+               if  (drv_context->len / drv_context->n_bytes <=
+                       drv_context->rx_fifo_threshold) {
+                       u32 rx_fifo_threshold;
+
+                       rx_fifo_threshold = (drv_context->len & ~(4 - 1)) /
+                               drv_context->n_bytes;
+                       cr1 &= ~(SSCR1_RFT);
+                       cr1 |= SSCR1_RxTresh(rx_fifo_threshold)
+                                       & SSCR1_RFT;
+               } else {
+                       write_SSTO(chip->timeout, reg);
+               }
+       }
+
+       dev_dbg(dev,
+               "transfer len:%zd  n_bytes:%d  cr0:%x  cr1:%x",
+               drv_context->len, drv_context->n_bytes, chip->cr0, cr1);
+
+       /* first set CR1 */
+       write_SSCR1(cr1, reg);
+
+       /* Do bitbanging only if SSP not-enabled or not-synchronized */
+       if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+               (!(read_SSCR0(reg) & SSCR0_SSE))) &&
+               (drv_context->quirks & QUIRKS_BIT_BANGING))) {
+                       start_bitbanging(drv_context);
+       } else {
+               /* (re)start the SSP */
+               write_SSCR0(chip->cr0, reg);
+       }
+
+       if (likely(chip->dma_enabled)) {
+               if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+                       pm_qos_update_request(&drv_context->pm_qos_req,
+                               MIN_EXIT_LATENCY);
+               dma_transfer(drv_context);
+       } else {
+               tasklet_schedule(&drv_context->poll_transfer);
+       }
+
+       return 0;
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi:       Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+       struct intel_mid_ssp_spi_chip *chip_info = NULL;
+       struct chip_data *chip;
+       struct ssp_driver_context *drv_context =
+               spi_master_get_devdata(spi->master);
+       u32 tx_fifo_threshold;
+       u32 burst_size;
+       u32 clk_div;
+
+       if (!spi->bits_per_word)
+               spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+       if ((spi->bits_per_word < MIN_BITS_PER_WORD
+               || spi->bits_per_word > MAX_BITS_PER_WORD))
+               return -EINVAL;
+
+       chip = spi_get_ctldata(spi);
+       if (!chip) {
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+               if (!chip) {
+                       dev_err(&spi->dev,
+                       "failed setup: can't allocate chip data\n");
+                       return -ENOMEM;
+               }
+       }
+       chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+               spi->bits_per_word - 16 : spi->bits_per_word)
+                       | SSCR0_SSE
+                       | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+       /* protocol drivers may change the chip settings, so...  */
+       /* if chip_info exists, use it                           */
+       chip_info = spi->controller_data;
+
+       /* chip_info isn't always needed */
+       chip->cr1 = 0;
+       if (chip_info) {
+               burst_size = chip_info->burst_size;
+               if (burst_size > IMSS_FIFO_BURST_8)
+                       burst_size = DFLT_FIFO_BURST_SIZE;
+
+               chip->timeout = chip_info->timeout;
+
+               if (chip_info->enable_loopback)
+                       chip->cr1 |= SSCR1_LBM;
Who sets the enable_loopback?


<snip>

+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+       enum intel_mid_ssp_spi_fifo_burst burst_size;
+       u32 timeout;
+       u8 enable_loopback;
+       u8 dma_enabled;
+};
+
+
+#define SPI_DIB_NAME_LEN  16
+#define SPI_DIB_SPEC_INFO_LEN      10
+
+struct spi_dib_header {
+       u32       signature;
+       u32       length;
+       u8         rev;
+       u8         checksum;
+       u8         dib[0];
+} __packed;
+
+#endif /*INTEL_MID_SSP_SPI_H_*/
--
1.7.1




------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov
_______________________________________________
spi-devel-general mailing list
spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org<mailto:spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org>
https://lists.sourceforge.net/lists/listinfo/spi-devel-general

------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]       ` <253F3AA5ECB4EC43A2CA0147545F67F2102B5D40-0J0gbvR4kTiiAffOGbnezLfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2012-11-22  6:54         ` Shubhrajyoti Datta
       [not found]           ` <CAM=Q2cszn_OoTyYiUVSj3NvpxJq+wSUnMJVcwWOdV2EzDviLVw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 26+ messages in thread
From: Shubhrajyoti Datta @ 2012-11-22  6:54 UTC (permalink / raw)
  To: Bi, Chao
  Cc: Chen, Jun D, Mills, Ken K, Centelles, Sylvain,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Thu, Nov 22, 2012 at 8:56 AM, Bi, Chao <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

>               if (chip_info->enable_loopback)
> +                       chip->cr1 |= SSCR1_LBM;****
>
> Who sets the enable_loopback?****
>
> ** **
>
> [Chao] ‘enable_loopback’ could be configured by SPI Protocol driver before
> it setup SPI controller. Generally it is not set by default because it’s
> used for test and validation.
>

Should it not then depend on  (spi mode ) SPI_LOOP ?

Or am I missing something.

> ****
>
> ** **
>
> Thanks
>
------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]       ` <20121121122630.13fc2087-Z/y2cZnRghHXmaaqVzeoHQ@public.gmane.org>
@ 2012-11-22  7:01         ` Shubhrajyoti Datta
       [not found]           ` <CAM=Q2cuCZni2DyzDux-E5H4-djgNrUURTYJ+f=_oMBeJE7eGMw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 26+ messages in thread
From: Shubhrajyoti Datta @ 2012-11-22  7:01 UTC (permalink / raw)
  To: Alan Cox
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, chao bi,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

On Wed, Nov 21, 2012 at 5:56 PM, Alan Cox <alan-VuQAYsv1563Yd54FQh9/CA@public.gmane.org> wrote:

> On Wed, 21 Nov 2012 17:44:21 +0530
> Shubhrajyoti Datta <omaplinuxkernel-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
>
> > On Wed, Nov 21, 2012 at 7:46 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> >
> > > +       /* Create the PM_QOS request */
> > > +       if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> > > +               pm_qos_add_request(&drv_context->pm_qos_req,
> > > +               PM_QOS_CPU_DMA_LATENCY,
> > > +               PM_QOS_DEFAULT_VALUE);
> > >
> >
> > What happens if the flag is not set if it is absolutely necessary for
> > the driver it should not be a
> > configurable option
>
> If you read through the code it's set only when the device is
> Moorestown/Oaktrail based and only in slave mode. It is not necessary
> in other configurations.
>

Thats what I was trying to understand.

If I am not wrong the latency is time related.
Why only some platforms / modes need it also the value is not speed
dependent.


My doubt is that the time taken for the dma will be more in lower speed so
the
latency constraint could be relaxed.

Also the spi core today doesnt have slave mode support thats a different
discussion altogether may be we can leave it for now.




>
> Alan
>
------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: [PATCH] SPI: SSP SPI Controller driver
       [not found]           ` <CAM=Q2cszn_OoTyYiUVSj3NvpxJq+wSUnMJVcwWOdV2EzDviLVw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2012-11-22  8:13             ` Bi, Chao
  0 siblings, 0 replies; 26+ messages in thread
From: Bi, Chao @ 2012-11-22  8:13 UTC (permalink / raw)
  To: Shubhrajyoti Datta
  Cc: Chen, Jun D, Mills, Ken K, Centelles, Sylvain,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA


On Thu, Nov 22, 2012 at 8:56 AM, Bi, Chao <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org<mailto:chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>> wrote:
              if (chip_info->enable_loopback)
+                       chip->cr1 |= SSCR1_LBM;
Who sets the enable_loopback?

[Chao] 'enable_loopback' could be configured by SPI Protocol driver before it setup SPI controller. Generally it is not set by default because it's used for test and validation.

Should it not then depend on  (spi mode ) SPI_LOOP ?

Or am I missing something.

[Chao] I think it's up to protocol driver: if protocol driver choose to configure depend on (spi mode)SPI_LOOP, then it should set spi_device.controller_data to make 'enable_loopback' corresponds to (spi mode) SPI_LOOP,
anyhow, SSP controller will always judge spi loop only through spi_device.controller_data which may be changed by protocol driver.

in spi_intel_mid_ssp.c:

/* protocol drivers may change the chip settings, so...  */
         /* if chip_info exists, use it                           */
         chip_info = spi->controller_data;
------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]           ` <CAM=Q2cuCZni2DyzDux-E5H4-djgNrUURTYJ+f=_oMBeJE7eGMw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2012-11-22 11:04             ` Alan Cox
  0 siblings, 0 replies; 26+ messages in thread
From: Alan Cox @ 2012-11-22 11:04 UTC (permalink / raw)
  To: Shubhrajyoti Datta
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, chao bi,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

> Thats what I was trying to understand.
> 
> If I am not wrong the latency is time related.
> Why only some platforms / modes need it also the value is not speed
> dependent.

Because the problem was fixed in the later devices.

> Also the spi core today doesnt have slave mode support thats a
> different discussion altogether may be we can leave it for now.

I'd rather we kept the support in the driver ready for that.

Alan

------------------------------------------------------------------------------
Monitor your physical, virtual and cloud infrastructure from a single
web console. Get in-depth insight into apps, servers, databases, vmware,
SAP, cloud infrastructure, etc. Download 30-day Free Trial.
Pricing starts from $795 for 25 servers or applications!
http://p.sf.net/sfu/zoho_dev2dev_nov

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-11-21  2:16 [PATCH] SPI: SSP SPI Controller driver chao bi
  2012-11-21 12:08 ` Shubhrajyoti Datta
  2012-11-21 12:14 ` Shubhrajyoti Datta
@ 2012-12-06 12:38 ` Grant Likely
  2012-12-06 14:19   ` Alan Cox
                     ` (2 more replies)
  2012-12-17 11:23 ` Linus Walleij
  3 siblings, 3 replies; 26+ messages in thread
From: Grant Likely @ 2012-12-06 12:38 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

On Wed, 21 Nov 2012 10:16:43 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> 
> This patch is to implement SSP SPI controller driver, which has been applied and
> validated on intel Moorestown & Medfield platform. The patch are originated by
> Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
> and to be further developed by Channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Chen Jun
> <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> according to their integration & validation on Medfield platform.
> 
> Signed-off-by: Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Chen Jun <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>

Hi Chao,

Thanks for the patch, comments below...

> ---
>  drivers/spi/Kconfig                   |    9 +
>  drivers/spi/Makefile                  |    1 +
>  drivers/spi/spi-intel-mid-ssp.c       | 1407 +++++++++++++++++++++++++++++++++
>  include/linux/spi/spi-intel-mid-ssp.h |  326 ++++++++

Most (if not all) of this header file looks like it needs to be moved
into the .c file. Any symbol that is only used by the driver's .c file
(usually, anything that isn't platform_data) belongs in the .c

>  4 files changed, 1743 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/spi/spi-intel-mid-ssp.c
>  create mode 100644 include/linux/spi/spi-intel-mid-ssp.h
> 
> diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
> index 1acae35..8b4461b 100644
> --- a/drivers/spi/Kconfig
> +++ b/drivers/spi/Kconfig
> @@ -179,6 +179,15 @@ config SPI_IMX
>  	  This enables using the Freescale i.MX SPI controllers in master
>  	  mode.
>  
> +config SPI_INTEL_MID_SSP
> +	tristate "SSP SPI controller driver for Intel MID platforms"
> +	depends on SPI_MASTER && INTEL_MID_DMAC
> +	help
> +	  This is the unified SSP SPI master controller driver for
> +	  the Intel MID platforms, handling Moorestown & Medfield,
> +	  master clock mode.
> +	  It supports Bulverde SSP core.
> +

I think I've asked this question before, but I can't remember if I've
gotten an answer. How is this different from the designware spi
controller that is already in the tree for medfield and moorestown MID
platforms? (drivers/spi/spi-dw-mid.c).

>  config SPI_LM70_LLP
>  	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
>  	depends on PARPORT && EXPERIMENTAL
> diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
> index c48df47..83f06d0 100644
> --- a/drivers/spi/Makefile
> +++ b/drivers/spi/Makefile
> @@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_FSL_ESPI)		+= spi-fsl-espi.o
>  obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
>  obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
>  obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
> +obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= spi-intel-mid-ssp.o
>  obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
>  obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
>  obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
> diff --git a/drivers/spi/spi-intel-mid-ssp.c b/drivers/spi/spi-intel-mid-ssp.c
> new file mode 100644
> index 0000000..8fca48f
> --- /dev/null
> +++ b/drivers/spi/spi-intel-mid-ssp.c
> @@ -0,0 +1,1407 @@
> +/*
> + * spi-intel-mid-ssp.c
> + * This driver supports Bulverde SSP core used on Intel MID platforms
> + * It supports SSP of Moorestown & Medfield platforms and handles clock
> + * slave & master modes.
> + *
> + * Copyright (c) 2010, Intel Corporation.
> + *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> + *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program; if not, write to the Free Software Foundation, Inc.,
> + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> + *
> + */
> +
> +/*
> + * Note:
> + *
> + * Supports DMA and non-interrupt polled transfers.
> + *
> + */
> +
> +#include <linux/delay.h>
> +#include <linux/interrupt.h>
> +#include <linux/highmem.h>
> +#include <linux/pci.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/intel_mid_dma.h>
> +#include <linux/pm_qos.h>
> +#include <linux/module.h>
> +
> +#include <linux/spi/spi.h>
> +#include <linux/spi/spi-intel-mid-ssp.h>
> +
> +#define DRIVER_NAME "intel_mid_ssp_spi_unified"

This string is used exactly one. Drop the #define.

> +
> +MODULE_AUTHOR("Ken Mills");
> +MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
> +MODULE_LICENSE("GPL");
> +
> +static const struct pci_device_id pci_ids[];

If you move the pci_ids table up to this point then the forward
declaration can be eliminated.

Also, use a driver-specific prefix on all symbols, even if they are
static. It makes it a lot easier to navigate code when the symbol names
match the driver and it avoids any possibility of conflict with the
global namespace. Something like "midssp_" or "midssp_spi_".

So, this symbol would be midssp_spi_pci_ids[], and all the static
functions below should be renamed.

> +
> +#ifdef DUMP_RX

Nit: Since this is debug code, rename the define to something like
DEBUG_DUMP_RX.

> +static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
> +{
> +	int tlen1 = (len < sz ? len : sz);
> +	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> +	unsigned char *p;
> +	static char msg[MAX_SPI_TRANSFER_SIZE];

Is this size a limitation of the hardware, of of the driver?

> +
> +	memset(msg, '\0', sizeof(msg));
> +	p = buf;
> +	while (p < buf + tlen1)
> +		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +
> +	if (tlen2 > 0) {
> +		sprintf(msg, "%s .....", msg);
> +		p = (buf+len) - tlen2;
> +		while (p < buf + len)
> +			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +	}

This looks like it will overrun the msg buffer. It adds 2 bytes for
every byte of data. It's also kind of sketchy code to sprintf into the
same buffer you're reading from.

You could avoid all these problems by using print_hex_dump() instead.

> +
> +	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
> +		   len-tlen2, len - 1, msg);
> +}
> +#endif
> +
> +static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)

u32 ==> bool

> +{
> +	u32 sssr;
> +	sssr = read_SSSR(drv_context->ioaddr);
> +	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
> +		return 0;
> +	else
> +		return 1;

or simply: return (sssr & (SSR_TFL_MASK || SSSR_TNF)) != 0;


... Okay, so I just went looking for the read_SSSR() function because I
wanted to know how it was defined. I just discovered that this driver is
the same as drivers/spi/spi-pxa2xx.c with a PCI front end bolted on.

I'm not keen on having two separate drivers for the same logic block.

> +/**
> + * intel_mid_ssp_spi_probe() - Driver probe procedure
> + * @pdev:	Pointer to the pci_dev struct
> + * @ent:	Pointer to the pci_device_id struct
> + */
> +static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
> +	const struct pci_device_id *ent)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct spi_master *master;
> +	struct ssp_driver_context *drv_context = 0;
> +	int status;
> +	u32 iolen = 0;
> +	u8 ssp_cfg;
> +	int pos;
> +	void __iomem *syscfg_ioaddr;
> +	unsigned long syscfg;
> +
> +	/* Check if the SSP we are probed for has been allocated */
> +	/* to operate as SPI. This information is retreived from */
> +	/* the field adid of the Vendor-Specific PCI capability  */
> +	/* which is used as a configuration register.            */
> +	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
> +	if (pos > 0) {
> +		pci_read_config_byte(pdev,
> +			pos + VNDR_CAPABILITY_ADID_OFFSET,
> +			&ssp_cfg);
> +	} else {
> +		dev_info(dev, "No Vendor Specific PCI capability\n");
> +		goto err_abort_probe;
> +	}
> +
> +	if (SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
> +		dev_info(dev, "Unsupported SSP mode (%02xh)\n",
> +			ssp_cfg);
> +		goto err_abort_probe;
> +	}
> +
> +	dev_info(dev, "found PCI SSP controller(ID: %04xh:%04xh cfg: %02xh)\n",
> +		pdev->vendor, pdev->device, ssp_cfg);
> +
> +	status = pci_enable_device(pdev);
> +	if (status)
> +		return status;
> +
> +	/* Allocate Slave with space for drv_context and null dma buffer */
> +	master = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
> +
> +	if (!master) {
> +		dev_err(dev, "cannot alloc spi_slave\n");
> +		status = -ENOMEM;
> +		goto err_free_0;
> +	}
> +
> +	drv_context = spi_master_get_devdata(master);
> +	drv_context->master = master;
> +
> +	drv_context->pdev = pdev;
> +	drv_context->quirks = ent->driver_data;
> +
> +	/* Set platform & configuration quirks */
> +	if (drv_context->quirks & QUIRKS_PLATFORM_MRST) {
> +		/* Apply bit banging workarround on MRST */
> +		drv_context->quirks |= QUIRKS_BIT_BANGING;
> +		/* MRST slave mode workarrounds */
> +		if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
> +			drv_context->quirks |=
> +				QUIRKS_USE_PM_QOS |
> +				QUIRKS_SRAM_ADDITIONAL_CPY;
> +	}
> +	drv_context->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
> +	if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
> +		drv_context->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
> +
> +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> +	master->num_chipselect = 1;
> +	master->cleanup = cleanup;
> +	master->setup = setup;
> +	master->transfer = transfer;
> +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);

Workqueue management is integrated into the core spi infrastructure now.
SPI drivers should no longer be creating their own workqueues.

Instead, replace the ->transfer hook with prepare_transfer_hardware(),
unprepare_transfer_hardware() and transfer_one_message(). See
Documentation/spi/spi-summary for details.

> +static int __init intel_mid_ssp_spi_init(void)
> +{
> +	return pci_register_driver(&intel_mid_ssp_spi_driver);
> +}
> +
> +late_initcall(intel_mid_ssp_spi_init);

Why late_initcall()? module_init() should be sufficient. Or better yet
replace the init and exit functions with module_pci_driver()

> diff --git a/include/linux/spi/spi-intel-mid-ssp.h b/include/linux/spi/spi-intel-mid-ssp.h
> new file mode 100644
> index 0000000..1b90b75
> --- /dev/null
> +++ b/include/linux/spi/spi-intel-mid-ssp.h

As mentioned above, most if not all of the stuff in this file belongs in the .c.

g.

------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-06 12:38 ` Grant Likely
@ 2012-12-06 14:19   ` Alan Cox
       [not found]     ` <20121206141938.0100f06f-Z/y2cZnRghHXmaaqVzeoHQ@public.gmane.org>
  2012-12-11  2:00   ` chao bi
  2012-12-11  8:58   ` chao bi
  2 siblings, 1 reply; 26+ messages in thread
From: Alan Cox @ 2012-12-06 14:19 UTC (permalink / raw)
  To: Grant Likely
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w, chao bi,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

> I think I've asked this question before, but I can't remember if I've
> gotten an answer. How is this different from the designware spi
> controller that is already in the tree for medfield and moorestown MID
> platforms? (drivers/spi/spi-dw-mid.c).

Different devices.


> ... Okay, so I just went looking for the read_SSSR() function because
> I wanted to know how it was defined. I just discovered that this
> driver is the same as drivers/spi/spi-pxa2xx.c with a PCI front end
> bolted on.

Quite possible - I wans't aware of that but they may well come from
the same origin.
 
> > +late_initcall(intel_mid_ssp_spi_init);
> 
> Why late_initcall()? module_init() should be sufficient. Or better yet
> replace the init and exit functions with module_pci_driver()

Thats a legacy of the old SPI code not handling bus and device
registration in random orders. So it's no longer needed I believe.

Alan

------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-06 12:38 ` Grant Likely
  2012-12-06 14:19   ` Alan Cox
@ 2012-12-11  2:00   ` chao bi
  2012-12-11 16:36     ` Grant Likely
  2012-12-11  8:58   ` chao bi
  2 siblings, 1 reply; 26+ messages in thread
From: chao bi @ 2012-12-11  2:00 UTC (permalink / raw)
  To: Grant Likely
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

Hi Grant,
Thanks for your comments, please see my answer below..

On Thu, 2012-12-06 at 12:38 +0000, Grant Likely wrote:

> >  include/linux/spi/spi-intel-mid-ssp.h |  326 ++++++++
> 
> Most (if not all) of this header file looks like it needs to be moved
> into the .c file. Any symbol that is only used by the driver's .c file
> (usually, anything that isn't platform_data) belongs in the .c

Yes, we'll update it.

> > +#define DRIVER_NAME "intel_mid_ssp_spi_unified"
> 
> This string is used exactly one. Drop the #define.

Yes, we'll update it.

> > +static const struct pci_device_id pci_ids[];
> 
> If you move the pci_ids table up to this point then the forward
> declaration can be eliminated.
> 
> Also, use a driver-specific prefix on all symbols, even if they are
> static. It makes it a lot easier to navigate code when the symbol names
> match the driver and it avoids any possibility of conflict with the
> global namespace. Something like "midssp_" or "midssp_spi_".
> 
> So, this symbol would be midssp_spi_pci_ids[], and all the static
> functions below should be renamed.

Yes, we'll update it.

> > +static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
> > +{
> > +	int tlen1 = (len < sz ? len : sz);
> > +	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> > +	unsigned char *p;
> > +	static char msg[MAX_SPI_TRANSFER_SIZE];
> 
> Is this size a limitation of the hardware, of of the driver?

I think this size is attributed to the DMA controller's maximum block size. 
On Medfield platform, the DMA controller used by SSP SPI has defined its maximum 
block size and word width, so SPI transfer size should not exceed the maximum size that 
DMA could transfer in one block.

> > +
> > +	memset(msg, '\0', sizeof(msg));
> > +	p = buf;
> > +	while (p < buf + tlen1)
> > +		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> > +
> > +	if (tlen2 > 0) {
> > +		sprintf(msg, "%s .....", msg);
> > +		p = (buf+len) - tlen2;
> > +		while (p < buf + len)
> > +			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> > +	}
> 
> This looks like it will overrun the msg buffer. It adds 2 bytes for
> every byte of data. It's also kind of sketchy code to sprintf into the
> same buffer you're reading from.
> 
> You could avoid all these problems by using print_hex_dump() instead.

Yes, we'll update it.

> > +static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
> 
> u32 ==> bool

Yes, we'll update it.

> > +	u32 sssr;
> > +	sssr = read_SSSR(drv_context->ioaddr);
> > +	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
> > +		return 0;
> > +	else
> > +		return 1;
> 
> or simply: return (sssr & (SSR_TFL_MASK || SSSR_TNF)) != 0;

Yes, that's better.

> ... Okay, so I just went looking for the read_SSSR() function because I
> wanted to know how it was defined. I just discovered that this driver is
> the same as drivers/spi/spi-pxa2xx.c with a PCI front end bolted on.
> 
> I'm not keen on having two separate drivers for the same logic block.

For SSP SPI driver, read_SSSR() is defined in spi-intel-mid-ssp.h:

#define DEFINE_SSP_REG(reg, off) \
static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); }
...
DEFINE_SSP_REG(SSSR, 0x08)

> > +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> 
> Workqueue management is integrated into the core spi infrastructure now.
> SPI drivers should no longer be creating their own workqueues.
> 
> Instead, replace the ->transfer hook with prepare_transfer_hardware(),
> unprepare_transfer_hardware() and transfer_one_message(). See
> Documentation/spi/spi-summary for details.

OK, we'll change according to workqueue management.

> > +late_initcall(intel_mid_ssp_spi_init);
> 
> Why late_initcall()? module_init() should be sufficient. Or better yet
> replace the init and exit functions with module_pci_driver()

Yes, module_pci_driver() is good.

> As mentioned above, most if not all of the stuff in this file belongs in the .c.

Thanks for the detailed comments, we're updating per above comments,
after validation is done, we will resubmit patch for you review.



------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-06 12:38 ` Grant Likely
  2012-12-06 14:19   ` Alan Cox
  2012-12-11  2:00   ` chao bi
@ 2012-12-11  8:58   ` chao bi
  2012-12-11 16:46     ` Grant Likely
  2012-12-17  8:58     ` Linus Walleij
  2 siblings, 2 replies; 26+ messages in thread
From: chao bi @ 2012-12-11  8:58 UTC (permalink / raw)
  To: Grant Likely
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

On Thu, 2012-12-06 at 12:38 +0000, Grant Likely wrote:
> On Wed, 21 Nov 2012 10:16:43 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

> > +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> > +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> > +	master->num_chipselect = 1;
> > +	master->cleanup = cleanup;
> > +	master->setup = setup;
> > +	master->transfer = transfer;
> > +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> > +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> 
> Workqueue management is integrated into the core spi infrastructure now.
> SPI drivers should no longer be creating their own workqueues.
> 
> Instead, replace the ->transfer hook with prepare_transfer_hardware(),
> unprepare_transfer_hardware() and transfer_one_message(). See
> Documentation/spi/spi-summary for details.

Hi Grant,
I'd like to talk about my understanding here, please correct me if I was wrong:

1. I understand the workqueue in spi core is for driving message
transfer, so SPI driver should not create new workqueue for this usage.
However, the workqueue created here is not for this usage it's to call
back to SPI protocol driver (ifx6x60.c) when DMA data transfer is
finished, so it seems not conflict with spi core. Am I right?

2. Currently our Medfield Platform SW is based on linux-3.0, transfer_one_message() 
is not implemented, so in SPI driver, we're still use ->transfer(), this 
is with long-term validation. If we change to ->transfer_one_message() now, 
it's hardly to do thorough validation on our platform, so shall we complete 
this part by 2 steps, firstly we implement with ->transfer() hoot which can be 
validation on our hardware platform, next step, when our internal SW version 
is upgraded to latest Linux version, then we raise a patch to adapt new spi core.
what's your opinion?

chao


------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]     ` <20121206141938.0100f06f-Z/y2cZnRghHXmaaqVzeoHQ@public.gmane.org>
@ 2012-12-11 14:30       ` Jun Chen
  0 siblings, 0 replies; 26+ messages in thread
From: Jun Chen @ 2012-12-11 14:30 UTC (permalink / raw)
  To: Alan Cox, Grant Likely
  Cc: spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w, chao bi,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

On Thu, 2012-12-06 at 14:19 +0000, Alan Cox wrote:

> > > +late_initcall(intel_mid_ssp_spi_init);
> > 
> > Why late_initcall()? module_init() should be sufficient. Or better yet
> > replace the init and exit functions with module_pci_driver()
> 
> Thats a legacy of the old SPI code not handling bus and device
> registration in random orders. So it's no longer needed I believe.
> 
> Alan

We use the late_initcall because we want init the spi driver after
finished the dma driver with fs_initcall. Now we can not test the 
module_pci_driver replace the late_initcall because our kernel version
is not brach 3.7. So maybe we can use old the late_initcall and update
this code in the further, Do you agree?


------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-11  2:00   ` chao bi
@ 2012-12-11 16:36     ` Grant Likely
  0 siblings, 0 replies; 26+ messages in thread
From: Grant Likely @ 2012-12-11 16:36 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

On Tue, 11 Dec 2012 10:00:16 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > > +static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
> > > +{
> > > +	int tlen1 = (len < sz ? len : sz);
> > > +	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> > > +	unsigned char *p;
> > > +	static char msg[MAX_SPI_TRANSFER_SIZE];
> > 
> > Is this size a limitation of the hardware, of of the driver?
> 
> I think this size is attributed to the DMA controller's maximum block size. 
> On Medfield platform, the DMA controller used by SSP SPI has defined its maximum 
> block size and word width, so SPI transfer size should not exceed the maximum size that 
> DMA could transfer in one block.

Typically what a driver should do here is to split up the transfer into
multiple DMA operations. I won't nack the driver over this issue, but
the driver should not have a maximum transfer size limitation in this
way.

g.


------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-11  8:58   ` chao bi
@ 2012-12-11 16:46     ` Grant Likely
  2012-12-13  9:09       ` chao bi
  2012-12-17  8:58     ` Linus Walleij
  1 sibling, 1 reply; 26+ messages in thread
From: Grant Likely @ 2012-12-11 16:46 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w

On Tue, 11 Dec 2012 16:58:31 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> On Thu, 2012-12-06 at 12:38 +0000, Grant Likely wrote:
> > On Wed, 21 Nov 2012 10:16:43 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> 
> > > +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> > > +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> > > +	master->num_chipselect = 1;
> > > +	master->cleanup = cleanup;
> > > +	master->setup = setup;
> > > +	master->transfer = transfer;
> > > +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> > > +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> > 
> > Workqueue management is integrated into the core spi infrastructure now.
> > SPI drivers should no longer be creating their own workqueues.
> > 
> > Instead, replace the ->transfer hook with prepare_transfer_hardware(),
> > unprepare_transfer_hardware() and transfer_one_message(). See
> > Documentation/spi/spi-summary for details.
> 
> Hi Grant,
> I'd like to talk about my understanding here, please correct me if I was wrong:
> 
> 1. I understand the workqueue in spi core is for driving message
> transfer, so SPI driver should not create new workqueue for this usage.
> However, the workqueue created here is not for this usage it's to call
> back to SPI protocol driver (ifx6x60.c) when DMA data transfer is
> finished, so it seems not conflict with spi core. Am I right?

It appears to me like all the stuff in int_transfer_complete() can be
performed at interrupt context, or gets removed in moving to the new
system. Am I mistaken here?

> 2. Currently our Medfield Platform SW is based on linux-3.0, transfer_one_message() 
> is not implemented, so in SPI driver, we're still use ->transfer(), this 
> is with long-term validation. If we change to ->transfer_one_message() now, 
> it's hardly to do thorough validation on our platform, so shall we complete 
> this part by 2 steps, firstly we implement with ->transfer() hoot which can be 
> validation on our hardware platform, next step, when our internal SW version 
> is upgraded to latest Linux version, then we raise a patch to adapt new spi core.
> what's your opinion?

Has it been tested on current mainline? I won't nak the driver if it
doesn't use the common workqueue, but it does make it a lot

g.


------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-11 16:46     ` Grant Likely
@ 2012-12-13  9:09       ` chao bi
  2012-12-16 21:32         ` Grant Likely
  0 siblings, 1 reply; 26+ messages in thread
From: chao bi @ 2012-12-13  9:09 UTC (permalink / raw)
  To: Grant Likely
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA, ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w


On Tue, 2012-12-11 at 16:36 +0000, Grant Likely wrote:
> > On Tue, 11 Dec 2012 10:00:16 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> wrote:
> > > > > +static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
> > > > > +{
> > > > > +	int tlen1 = (len < sz ? len : sz);
> > > > > +	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> > > > > +	unsigned char *p;
> > > > > +	static char msg[MAX_SPI_TRANSFER_SIZE];
> > > > 
> > > > Is this size a limitation of the hardware, of of the driver?
> > > 
> > > I think this size is attributed to the DMA controller's maximum block size. 
> > > On Medfield platform, the DMA controller used by SSP SPI has defined its maximum 
> > > block size and word width, so SPI transfer size should not exceed the maximum size that 
> > > DMA could transfer in one block.
> > 
> > Typically what a driver should do here is to split up the transfer into
> > multiple DMA operations. I won't nack the driver over this issue, but
> > the driver should not have a maximum transfer size limitation in this
> > way.

Yes, agree with you. But I'm not 100% sure it's the only reason to set such limitation here. 
we'll keep tracking this issue, if it's as what we see, we shall implement an enhanced mechanism 
for multi DMA transfer the next step.



On Tue, 2012-12-11 at 16:46 +0000, Grant Likely wrote:
> On Tue, 11 Dec 2012 16:58:31 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > On Thu, 2012-12-06 at 12:38 +0000, Grant Likely wrote:
> > > On Wed, 21 Nov 2012 10:16:43 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > 
> > > > +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> > > > +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> > > > +	master->num_chipselect = 1;
> > > > +	master->cleanup = cleanup;
> > > > +	master->setup = setup;
> > > > +	master->transfer = transfer;
> > > > +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> > > > +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> > > 
> > > Workqueue management is integrated into the core spi infrastructure now.
> > > SPI drivers should no longer be creating their own workqueues.
> > > 
> > > Instead, replace the ->transfer hook with prepare_transfer_hardware(),
> > > unprepare_transfer_hardware() and transfer_one_message(). See
> > > Documentation/spi/spi-summary for details.
> > 
> > Hi Grant,
> > I'd like to talk about my understanding here, please correct me if I was wrong:
> > 
> > 1. I understand the workqueue in spi core is for driving message
> > transfer, so SPI driver should not create new workqueue for this usage.
> > However, the workqueue created here is not for this usage it's to call
> > back to SPI protocol driver (ifx6x60.c) when DMA data transfer is
> > finished, so it seems not conflict with spi core. Am I right?
> 
> It appears to me like all the stuff in int_transfer_complete() can be
> performed at interrupt context, or gets removed in moving to the new
> system. Am I mistaken here?
> 

Yes, we can make use of new SPI core interface to callback to protocol driver
(through spi_finalize_current_message()), but looks like it's better to call
spi_finalize_current_message() inside workqueue than DMA interrupt context, 
because the callback function for protocol driver would cost much time, it's 
better to move this part out of interrupt context. Therefore, I prefer to keep 
the workqueue here if you agree, what's your opinion? 

> > 2. Currently our Medfield Platform SW is based on linux-3.0, transfer_one_message() 
> > is not implemented, so in SPI driver, we're still use ->transfer(), this 
> > is with long-term validation. If we change to ->transfer_one_message() now, 
> > it's hardly to do thorough validation on our platform, so shall we complete 
> > this part by 2 steps, firstly we implement with ->transfer() hoot which can be 
> > validation on our hardware platform, next step, when our internal SW version 
> > is upgraded to latest Linux version, then we raise a patch to adapt new spi core.
> > what's your opinion?
> 
> Has it been tested on current mainline? I won't nak the driver if it
> doesn't use the common workqueue, but it does make it a lot

Agree with you, linux 3.7 SPI CORE interface is much better, we'll keep SSP driver 
align with it and test based on mainline SPI, now we're porting mainline SPI to our 
platform for test, it may takes a few time, after that we'll re-submit for you review.

Thanks,
chao


------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-13  9:09       ` chao bi
@ 2012-12-16 21:32         ` Grant Likely
  2012-12-17  8:24           ` chao bi
  0 siblings, 1 reply; 26+ messages in thread
From: Grant Likely @ 2012-12-16 21:32 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, Linus Walleij,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Thu, 13 Dec 2012 17:09:34 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> On Tue, 2012-12-11 at 16:46 +0000, Grant Likely wrote:
> > On Tue, 11 Dec 2012 16:58:31 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > > On Thu, 2012-12-06 at 12:38 +0000, Grant Likely wrote:
> > > > On Wed, 21 Nov 2012 10:16:43 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > > 
> > > > > +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> > > > > +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> > > > > +	master->num_chipselect = 1;
> > > > > +	master->cleanup = cleanup;
> > > > > +	master->setup = setup;
> > > > > +	master->transfer = transfer;
> > > > > +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> > > > > +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> > > > 
> > > > Workqueue management is integrated into the core spi infrastructure now.
> > > > SPI drivers should no longer be creating their own workqueues.
> > > > 
> > > > Instead, replace the ->transfer hook with prepare_transfer_hardware(),
> > > > unprepare_transfer_hardware() and transfer_one_message(). See
> > > > Documentation/spi/spi-summary for details.
> > > 
> > > Hi Grant,
> > > I'd like to talk about my understanding here, please correct me if I was wrong:
> > > 
> > > 1. I understand the workqueue in spi core is for driving message
> > > transfer, so SPI driver should not create new workqueue for this usage.
> > > However, the workqueue created here is not for this usage it's to call
> > > back to SPI protocol driver (ifx6x60.c) when DMA data transfer is
> > > finished, so it seems not conflict with spi core. Am I right?
> > 
> > It appears to me like all the stuff in int_transfer_complete() can be
> > performed at interrupt context, or gets removed in moving to the new
> > system. Am I mistaken here?
> > 
> 
> Yes, we can make use of new SPI core interface to callback to protocol driver
> (through spi_finalize_current_message()), but looks like it's better to call
> spi_finalize_current_message() inside workqueue than DMA interrupt context, 
> because the callback function for protocol driver would cost much time, it's 
> better to move this part out of interrupt context. Therefore, I prefer to keep 
> the workqueue here if you agree, what's your opinion? 

It would be better to work within the context of the kthread that is
already managing transfers. Otherwise you've got multiple contexts that
could be competing. Plus the kthread may be running in realtime context,
but that would be useless since the workqueue would never have the same
priority.

It currently isn't documented whether or not protocol drivers can sleep
in the complete callback. I think it is assumed that it cannot, but that
should be verified. If it is a problem for the complete callback to
requre atomicity, then maybe we should have a separate .complete_atomic
hook for those that can handle it, and call .complete() in the kthread
context.

Linusw, you did a bunch of work on this. What do you think?

g.


------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-16 21:32         ` Grant Likely
@ 2012-12-17  8:24           ` chao bi
  0 siblings, 0 replies; 26+ messages in thread
From: chao bi @ 2012-12-17  8:24 UTC (permalink / raw)
  To: Grant Likely
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, Walleij,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Linus,
	alan-VuQAYsv1563Yd54FQh9/CA

On Sun, 2012-12-16 at 21:32 +0000, Grant Likely wrote:
> On Thu, 13 Dec 2012 17:09:34 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > On Tue, 2012-12-11 at 16:46 +0000, Grant Likely wrote:
> > > On Tue, 11 Dec 2012 16:58:31 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > > > On Thu, 2012-12-06 at 12:38 +0000, Grant Likely wrote:
> > > > > On Wed, 21 Nov 2012 10:16:43 +0800, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> > > > 
> > > > > > +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> > > > > > +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> > > > > > +	master->num_chipselect = 1;
> > > > > > +	master->cleanup = cleanup;
> > > > > > +	master->setup = setup;
> > > > > > +	master->transfer = transfer;
> > > > > > +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> > > > > > +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> > > > > 
> > > > > Workqueue management is integrated into the core spi infrastructure now.
> > > > > SPI drivers should no longer be creating their own workqueues.
> > > > > 
> > > > > Instead, replace the ->transfer hook with prepare_transfer_hardware(),
> > > > > unprepare_transfer_hardware() and transfer_one_message(). See
> > > > > Documentation/spi/spi-summary for details.
> > > > 
> > > > Hi Grant,
> > > > I'd like to talk about my understanding here, please correct me if I was wrong:
> > > > 
> > > > 1. I understand the workqueue in spi core is for driving message
> > > > transfer, so SPI driver should not create new workqueue for this usage.
> > > > However, the workqueue created here is not for this usage it's to call
> > > > back to SPI protocol driver (ifx6x60.c) when DMA data transfer is
> > > > finished, so it seems not conflict with spi core. Am I right?
> > > 
> > > It appears to me like all the stuff in int_transfer_complete() can be
> > > performed at interrupt context, or gets removed in moving to the new
> > > system. Am I mistaken here?
> > > 
> > 
> > Yes, we can make use of new SPI core interface to callback to protocol driver
> > (through spi_finalize_current_message()), but looks like it's better to call
> > spi_finalize_current_message() inside workqueue than DMA interrupt context, 
> > because the callback function for protocol driver would cost much time, it's 
> > better to move this part out of interrupt context. Therefore, I prefer to keep 
> > the workqueue here if you agree, what's your opinion? 
> 
> It would be better to work within the context of the kthread that is
> already managing transfers. Otherwise you've got multiple contexts that
> could be competing. Plus the kthread may be running in realtime context,
> but that would be useless since the workqueue would never have the same
> priority.

Yes, it's done as you comments, please check the patch we re-submit today 
in another mail.

> It currently isn't documented whether or not protocol drivers can sleep
> in the complete callback. I think it is assumed that it cannot, but that
> should be verified. If it is a problem for the complete callback to
> requre atomicity, then maybe we should have a separate .complete_atomic
> hook for those that can handle it, and call .complete() in the kthread
> context.
> 
> Linusw, you did a bunch of work on this. What do you think?
> 
> g.
> 



------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-11  8:58   ` chao bi
  2012-12-11 16:46     ` Grant Likely
@ 2012-12-17  8:58     ` Linus Walleij
  1 sibling, 0 replies; 26+ messages in thread
From: Linus Walleij @ 2012-12-17  8:58 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Tue, Dec 11, 2012 at 9:58 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

> 1. I understand the workqueue in spi core is for driving message
> transfer, so SPI driver should not create new workqueue for this usage.
> However, the workqueue created here is not for this usage it's to call
> back to SPI protocol driver (ifx6x60.c) when DMA data transfer is
> finished, so it seems not conflict with spi core. Am I right?

So a single message can contain several transfers, and if this
is some per-transfer DMA thing, it could be valid. I need to go
in and look closer at the patch.

I've considered trying to also generalize parts of the transfer
handling but ran out of energy.

Yours,
Linus Walleij

------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-11-21  2:16 [PATCH] SPI: SSP SPI Controller driver chao bi
                   ` (2 preceding siblings ...)
  2012-12-06 12:38 ` Grant Likely
@ 2012-12-17 11:23 ` Linus Walleij
       [not found]   ` <CACRpkdad3fHxWRpRqD-eP8-sKKexN+s-JZCT6XLggv92Q=5kMA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  3 siblings, 1 reply; 26+ messages in thread
From: Linus Walleij @ 2012-12-17 11:23 UTC (permalink / raw)
  To: chao bi, Dan Williams, Vinod Koul
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Wed, Nov 21, 2012 at 3:16 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

> This patch is to implement SSP SPI controller driver, which has been applied and
> validated on intel Moorestown & Medfield platform. The patch are originated by
> Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
> and to be further developed by Channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Chen Jun
> <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> according to their integration & validation on Medfield platform.
>
> Signed-off-by: Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Chen Jun <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>

OK...

> +#ifdef DUMP_RX

So since this #define DUMP_RX is not part of this patch and not of the
kernel at large it's basically an #if 0 and all the code within such
defines should be deleted.

But I guess you have this undocumented feature that the developer
is supposed to hack the file and insert #define DUMP_RX to use it.

Idea: if you want to keep this use the kernel verbose debug system.
In drivers/spi/Kconfig we have:

config SPI_DEBUG
        boolean "Debug support for SPI drivers"
        depends on DEBUG_KERNEL
        help
          Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
          sysfs, and debugfs support in SPI controller and protocol drivers.

So insert something like:

config SPI_VERBOSE_DEBUG
        boolean "Verbose debug support for SPI drivers"
        depends on SPI_DEBUG
       ....

Modify Makefile to contain:

ccflags-$(CONFIG_SPI_VERBOSE_DEBUG) := -DVERBOSE_DEBUG

Then put the above withing #ifdef CONFIG_SPI_VERBOSE_DEBUG

Then you can use the dev_vdgb() and friends from <linux/device.h>.

Because I think that's what it is essentially: verbose debugging.

> +static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
> +{
> +       int tlen1 = (len < sz ? len : sz);
> +       int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> +       unsigned char *p;
> +       static char msg[MAX_SPI_TRANSFER_SIZE];
> +
> +       memset(msg, '\0', sizeof(msg));
> +       p = buf;
> +       while (p < buf + tlen1)
> +               sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +
> +       if (tlen2 > 0) {
> +               sprintf(msg, "%s .....", msg);
> +               p = (buf+len) - tlen2;
> +               while (p < buf + len)
> +                       sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +       }
> +
> +       dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
> +                  len-tlen2, len - 1, msg);

dev_vdbg().

(...)
> +static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)

Change return type to bool if you're just returning 0 or 1.

> +{
> +       u32 sssr;
> +       sssr = read_SSSR(drv_context->ioaddr);
> +       if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
> +               return 0;
> +       else
> +               return 1;
> +}

return false/true.

> +static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
> +{
> +       return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
> +}

Dito. Here it is even more obvious.

(...)
> +static void flush(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       u32 i = 0;
> +
> +       /* If the transmit fifo is not empty, reset the interface. */
> +       if (!is_tx_fifo_empty(drv_context)) {
> +               dev_err(&drv_context->pdev->dev,
> +                               "TX FIFO not empty. Reset of SPI IF");
> +               disable_interface(drv_context);
> +               return;
> +       }
> +
> +       dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
> +       while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
> +               read_SSDR(reg);
> +               i++;
> +       }
> +       WARN(i > 0, "%d words flush occured\n", i);

WARN really? Why not dev_warn()?

> +static int null_writer(struct ssp_driver_context *drv_context)
> +static int null_reader(struct ssp_driver_context *drv_context)
> +static int u8_writer(struct ssp_driver_context *drv_context)
> +static int u8_reader(struct ssp_driver_context *drv_context)
> +static int u16_writer(struct ssp_driver_context *drv_context)
> +static int u16_reader(struct ssp_driver_context *drv_context)
> +static int u32_writer(struct ssp_driver_context *drv_context)
> +static int u32_reader(struct ssp_driver_context *drv_context)

These seem to all be designed to return 0 or 1 and should then be
bool. It seems strange actually, you would expect that such a
function returns the number of bytes or words read/written.

> +static bool chan_filter(struct dma_chan *chan, void *param)
> +static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
> +static void intel_mid_ssp_spi_dma_done(void *arg)
> +static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
> +static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
> +static void dma_transfer(struct ssp_driver_context *drv_context)
> +static int map_dma_buffers(struct ssp_driver_context *drv_context)

DMA code looks correct but it'd be nice to get Dan Williams
or Vinod Koul to check it. It's sooo easy to make tiny mistakes
(though it seems this code has seen some testing indeed).

(...)
> +/**
> + * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
> + * @drv_context:       Pointer to the private driver context
> + */
> +static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
> +{
> +       u32 length = drv_context->len;
> +
> +       if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
> +               && (drv_context->len > drv_context->rx_fifo_threshold *
> +               drv_context->n_bytes))
> +               length = TRUNCATE(drv_context->len,
> +                       drv_context->rx_fifo_threshold * drv_context->n_bytes);

TRUNCATE is a too generic name but I'll leave that comment for
the header file where it's defined.

It looks very strange.

Isn't this simply an arithmetic soup construction to say:

length = drv_context->len / (drv_context->rx_fifo_threshold *
drv_context->n_bytes);

Integer division redefined in unintelligible terms.

Please look over this. And that goes for the other instance of TRUNCATE()
as well.

> +
> +       memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
> +}

memcpy_fromio() on some SRAM...

If the SRAM is just a RAM why do you need the _fromio() copying?
(Just curious.)

> +static void int_transfer_complete(struct ssp_driver_context *drv_context)
> +{
> +       void *reg = drv_context->ioaddr;
> +       struct spi_message *msg;
> +       struct device *dev = &drv_context->pdev->dev;
> +
> +       if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
> +               pm_qos_update_request(&drv_context->pm_qos_req,
> +                                       PM_QOS_DEFAULT_VALUE);

It's weird that using PM QoS is treated as an unlikely oddity.
Should it not be the other way around?

> +
> +       if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
> +               sram_to_ddr_cpy(drv_context);
> +
> +       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
> +               drain_trail(drv_context);

Uusally likely() / unlikely() micro-optimization is discouraged,
do you have specific performance numbers for using it so
much here?

> +       else
> +               /* Stop getting Time Outs */
> +               write_SSTO(0, reg);
> +
> +       drv_context->cur_msg->status = 0;
> +       drv_context->cur_msg->actual_length = drv_context->len;
> +
> +#ifdef DUMP_RX
> +       dump_trailer(dev, drv_context->rx, drv_context->len, 16);
> +#endif

Atleast avoid doing these inlined #ifdefs please.

Define a stub up-there where it's defined instead:

#if DUMP_RX
void dump_trailer()
{
...
}
#else
void static inline dump_trailer() {}
#endif

The kernel already relies on the compiler to remove such
code completely, so should be done here as well.

> +       dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
> +       msg = drv_context->cur_msg;
> +       if (likely(msg->complete))
> +               msg->complete(msg->context);
> +}

So this is duplicating the code in the bulk SPI code.

Please try to use the generic transfer queue, it's really nice.

> +static void int_transfer_complete_work(struct work_struct *work)
> +{
> +       struct ssp_driver_context *drv_context = container_of(work,
> +                               struct ssp_driver_context, complete_work);
> +
> +       int_transfer_complete(drv_context);
> +}

What does "int_" mean in the above function signature?

Repeatedly in the file says interrupt mode is not supported so it can't
be "interrupt". "internal"?

Actaully it seems this is dead code. The only reference in the
driver appears to be when an unused WQ us pointed at this
function.

I think you have some clean-up to do if you don't support interrupt
mode.

> +static void poll_transfer_complete(struct ssp_driver_context *drv_context)
> +{
> +       struct spi_message *msg;
> +
> +       /* Update total byte transfered return count actual bytes read */
> +       drv_context->cur_msg->actual_length +=
> +               drv_context->len - (drv_context->rx_end - drv_context->rx);
> +
> +       drv_context->cur_msg->status = 0;
> +
> +       msg = drv_context->cur_msg;
> +       if (likely(msg->complete))
> +               msg->complete(msg->context);
> +}

This is also reimplementing the message transfer queue in the core.

Use the generic message queue.

> +/**
> + * ssp_int() - Interrupt handler
> + * @irq
> + * @dev_id
> + *
> + * The SSP interrupt is not used for transfer which are handled by
> + * DMA or polling: only under/over run are catched to detect
> + * broken transfers.
> + */
> +static irqreturn_t ssp_int(int irq, void *dev_id)
> +{
> +       struct ssp_driver_context *drv_context = dev_id;
> +       void *reg = drv_context->ioaddr;
> +       struct device *dev = &drv_context->pdev->dev;
> +       u32 status = read_SSSR(reg);
> +
> +       /* It should never be our interrupt since SSP will */
> +       /* only trigs interrupt for under/over run.        */

/*
 * Squash to some nice multiline comment will you?
 */

> +       if (likely(!(status & drv_context->mask_sr)))
> +               return IRQ_NONE;
> +
> +       if (status & SSSR_ROR || status & SSSR_TUR) {
> +               dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n", status);
> +               WARN_ON(1);
> +               if (status & SSSR_ROR)
> +                       dev_err(dev, "we have Overrun\n");
> +               if (status & SSSR_TUR)
> +                       dev_err(dev, "we have Underrun\n");
> +       }
> +
> +       /* We can fall here when not using DMA mode */

fall? fail?

> +       if (!drv_context->cur_msg) {
> +               disable_interface(drv_context);
> +               disable_triggers(drv_context);
> +       }

So you only do something if you *don't* have any messages?

> +       /* clear status register */
> +       write_SSSR(drv_context->clear_sr, reg);
> +       return IRQ_HANDLED;
> +}

I can't see how this error interrupt handler actually handles
errors. Has this been tested? It only seems you print an error
message and go on as usual.

You should tear down the ongoing transfer, set the msg->status
to some error code and/or retransmit or something should you not?

Atleast put in a TODO so that maintainers of this driver
know that there's something unhandled here.

(...)
> +/**
> + * start_bitbanging() - Clock synchronization by bit banging
> + * @drv_context:       Pointer to private driver context
> + *
> + * This clock synchronization will be removed as soon as it is
> + * handled by the SCU.
> + */
> +static void start_bitbanging(struct ssp_driver_context *drv_context)
> +{
> +       u32 sssr;
> +       u32 count = 0;
> +       u32 cr0;
> +       void *i2c_reg = drv_context->I2C_ioaddr;
> +       struct device *dev = &drv_context->pdev->dev;
> +       void *reg = drv_context->ioaddr;
> +       struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
> +       cr0 = chip->cr0;
> +
> +       dev_warn(dev, "In %s : Starting bit banging\n",\
> +               __func__);
> +       if (read_SSSR(reg) & SSP_NOT_SYNC)
> +               dev_warn(dev, "SSP clock desynchronized.\n");
> +       if (!(read_SSCR0(reg) & SSCR0_SSE))
> +               dev_warn(dev, "in SSCR0, SSP disabled.\n");
> +
> +       dev_dbg(dev, "SSP not ready, start CLK sync\n");
> +
> +       write_SSCR0(cr0 & ~SSCR0_SSE, reg);
> +       write_SSPSP(0x02010007, reg);

Aha 0x0201007.

Usually we define the bitfields, and actually you have:

<snip from header file>
+#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
+#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
+#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
<end>

Please use these bit specifiers to conjure the magic number instead.

(...)
> +       write_I2CDATA(0x3, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CCTRL(0x01070034, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CDATA(0x00000099, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CCTRL(0x01070038, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);

Dito.

> +       /* Bit bang the clock until CSS clears */
> +       while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
> +               write_I2CDATA(0x2, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               write_I2CCTRL(0x01070034, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               write_I2CDATA(0x3, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);
> +               write_I2CCTRL(0x01070034, i2c_reg);
> +               udelay(I2C_ACCESS_USDELAY);


Dito.

> +       if (count >= MAX_BITBANGING_LOOP)
> +               dev_err(dev,
> +                       "ERROR in %s : infinite loop on bit banging. Aborting\n",
> +                       __func__);
> +
> +       dev_dbg(dev, "---Bit bang count=%d\n", count);
> +
> +       write_I2CDATA(0x0, i2c_reg);
> +       udelay(I2C_ACCESS_USDELAY);
> +       write_I2CCTRL(0x01070038, i2c_reg);

Dito.

> +static unsigned int ssp_get_clk_div(int speed)
> +{
> +       return max(100000000 / speed, 4) - 1;
> +}

This was nice to see, good use of the max() operator!

(...)
> +static int transfer(struct spi_device *spi, struct spi_message *msg)

This is duplicating the core message transfer queue.

Refactor this code to use the new infrastructure.

(...)
> +static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
> +       const struct pci_device_id *ent)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct spi_master *master;
> +       struct ssp_driver_context *drv_context = 0;
> +       int status;
> +       u32 iolen = 0;
> +       u8 ssp_cfg;
> +       int pos;
> +       void __iomem *syscfg_ioaddr;
> +       unsigned long syscfg;
> +
> +       /* Check if the SSP we are probed for has been allocated */
> +       /* to operate as SPI. This information is retreived from */
> +       /* the field adid of the Vendor-Specific PCI capability  */
> +       /* which is used as a configuration register.            */

/*
 * Convert to multiline comment
 */

> +       pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
> +       if (pos > 0) {
> +               pci_read_config_byte(pdev,
> +                       pos + VNDR_CAPABILITY_ADID_OFFSET,
> +                       &ssp_cfg);
> +       } else {
> +               dev_info(dev, "No Vendor Specific PCI capability\n");
> +               goto err_abort_probe;
> +       }
> +
> +       if (SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
> +               dev_info(dev, "Unsupported SSP mode (%02xh)\n",
> +                       ssp_cfg);
> +               goto err_abort_probe;
> +       }
> +
> +       dev_info(dev, "found PCI SSP controller(ID: %04xh:%04xh cfg: %02xh)\n",
> +               pdev->vendor, pdev->device, ssp_cfg);
> +
> +       status = pci_enable_device(pdev);
> +       if (status)
> +               return status;
> +
> +       /* Allocate Slave with space for drv_context and null dma buffer */
> +       master = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
> +
> +       if (!master) {
> +               dev_err(dev, "cannot alloc spi_slave\n");
> +               status = -ENOMEM;
> +               goto err_free_0;
> +       }
> +
> +       drv_context = spi_master_get_devdata(master);
> +       drv_context->master = master;
> +
> +       drv_context->pdev = pdev;
> +       drv_context->quirks = ent->driver_data;
> +
> +       /* Set platform & configuration quirks */
> +       if (drv_context->quirks & QUIRKS_PLATFORM_MRST) {
> +               /* Apply bit banging workarround on MRST */
> +               drv_context->quirks |= QUIRKS_BIT_BANGING;
> +               /* MRST slave mode workarrounds */
> +               if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
> +                       drv_context->quirks |=
> +                               QUIRKS_USE_PM_QOS |
> +                               QUIRKS_SRAM_ADDITIONAL_CPY;
> +       }
> +       drv_context->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
> +       if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
> +               drv_context->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
> +
> +       master->mode_bits = SPI_CPOL | SPI_CPHA;
> +       master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> +       master->num_chipselect = 1;
> +       master->cleanup = cleanup;
> +       master->setup = setup;
> +       master->transfer = transfer;

Use the new message queue mechanism.

> +       drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> +       INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);

As noted this workqueue seems to be completely unused. Kill it.

> +       if (drv_context->quirks & QUIRKS_BIT_BANGING) {
> +               /* Bit banging on the clock is done through */
> +               /* DFT which is available through I2C.      */
> +               /* get base address of I2C_Serbus registers */
> +               drv_context->I2C_paddr = 0xff12b000;

What on earth is this?

Note the comment says "get base address", you're not getting it at
all, you're hardcoding it. Resources like this should be passed in from
the outside.

What will happen in the next platform when some ASIC engineer
decide to move this some pages ahead?

Don't you have some platform data/device tree/ACPI table or
whatever where this is supposed to be stored?

Looks like a criss-cross dependency to some I2C block, and
as such it deserves a big fat comment about the weirdness
going on here.

> +               drv_context->I2C_ioaddr =
> +                       ioremap_nocache(drv_context->I2C_paddr, 0x10);

Like the size of that ioregion.

And use devm_ioremap_nocache() to utilize managed resources.

> +               if (!drv_context->I2C_ioaddr) {
> +                       status = -ENOMEM;
> +                       goto err_free_3;
> +               }
> +       }
> +
> +       /* Attach to IRQ */
> +       drv_context->irq = pdev->irq;
> +       status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
> +               "intel_mid_ssp_spi", drv_context);

Use managed resources throughout:
devm_request_irq() in this case.

> +       if (status < 0) {
> +               dev_err(&pdev->dev, "can not get IRQ\n");
> +               goto err_free_4;
> +       }
> +
> +       if (drv_context->quirks & QUIRKS_PLATFORM_MDFL) {
> +               /* get base address of DMA selector. */
> +               syscfg = drv_context->paddr - SYSCFG;
> +               syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);

devm_ioremap_nocache()

> +               if (!syscfg_ioaddr) {
> +                       status = -ENOMEM;
> +                       goto err_free_5;
> +               }
> +               iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
> +       }
> +
> +       tasklet_init(&drv_context->poll_transfer, poll_transfer,
> +               (unsigned long)drv_context);

I think this tasklet can be removed and you can have the SPI core
message queue drive the transfers. But prove me wrong.

> +       /* Register with the SPI framework */
> +       dev_info(dev, "register with SPI framework (bus spi%d)\n",
> +               master->bus_num);
> +
> +       status = spi_register_master(master);
> +
> +       if (status != 0) {
> +               dev_err(dev, "problem registering spi\n");
> +               goto err_free_5;
> +       }
> +
> +       pci_set_drvdata(pdev, drv_context);
> +
> +       /* Create the PM_QOS request */
> +       if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> +               pm_qos_add_request(&drv_context->pm_qos_req,
> +               PM_QOS_CPU_DMA_LATENCY,
> +               PM_QOS_DEFAULT_VALUE);
> +
> +       return status;
> +
> +err_free_5:
> +       free_irq(drv_context->irq, drv_context);
> +err_free_4:
> +       iounmap(drv_context->I2C_ioaddr);
> +err_free_3:
> +       iounmap(drv_context->ioaddr);

These three go away with managed devm_* resources.

> +err_free_2:
> +       pci_release_region(pdev, 0);
> +err_free_1:
> +       spi_master_put(master);
> +err_free_0:
> +       pci_disable_device(pdev);
> +
> +       return status;
> +err_abort_probe:
> +       dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
> +               pdev->vendor, pdev->device);
> +       return -ENODEV;
> +}

(...)
> +static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
> +{
> +       struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> +
> +       if (!drv_context)
> +               return;
> +
> +       /* Release IRQ */
> +       free_irq(drv_context->irq, drv_context);
> +
> +       iounmap(drv_context->ioaddr);
> +       if (drv_context->quirks & QUIRKS_BIT_BANGING)
> +               iounmap(drv_context->I2C_ioaddr);

These also go away with devm_*

> +
> +       /* disconnect from the SPI framework */
> +       spi_unregister_master(drv_context->master);
> +
> +       pci_set_drvdata(pdev, NULL);
> +       pci_release_region(pdev, 0);
> +       pci_disable_device(pdev);
> +
> +       return;
> +}
> +
> +#ifdef CONFIG_PM
> +/**
> + * intel_mid_ssp_spi_suspend() - Driver suspend procedure
> + * @pdev:      Pointer to the pci_dev struct
> + * @state:     pm_message_t
> + */
> +static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
> +{
> +       struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> +       dev_dbg(&pdev->dev, "suspend\n");
> +
> +       tasklet_disable(&drv_context->poll_transfer);
> +
> +       return 0;
> +}

When using the central message queue you probably just call
spi_master_suspend()
spi_master_resume()

here and the framework takes care of the message queue.

(...)
> +++ b/include/linux/spi/spi-intel-mid-ssp.h
> @@ -0,0 +1,326 @@
> +/*
> + *  Copyright (C) Intel 2009
> + *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> + *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> + *
> + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Cut these nice wallpapers... OK no big deal maybe.

(...)

For the following review comments, begin with adding:

#include <linux/bitops.h>

So you get the BIT() macro and some more.

> +#define SSP_NOT_SYNC 0x400000

Then you can write:

#define SSP_NOT_SYNC BIT(22)

Which tell us what this is actually about. A flag in bit 22.

> +#define MAX_SPI_TRANSFER_SIZE 8192
> +#define MAX_BITBANGING_LOOP   10000
> +#define SPI_FIFO_SIZE 16
> +
> +/* PM QoS define */
> +#define MIN_EXIT_LATENCY 20

State unit. milliseconds I think?

> +/* SSP assignement configuration from PCI config */
> +#define SSP_CFG_GET_MODE(ssp_cfg)      ((ssp_cfg) & 0x07)
> +#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)        (((ssp_cfg) >> 3) & 0x07)
> +#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)  ((ssp_cfg) & 0x40)

& BIT(6)

> +#define SSP_CFG_SPI_MODE_ID            1
> +/* adid field offset is 6 inside the vendor specific capability */
> +#define VNDR_CAPABILITY_ADID_OFFSET    6
> +
> +/* Driver's quirk flags */
> +/* This workarround bufferizes data in the audio fabric SDRAM from  */
> +/* where the DMA transfers will operate. Should be enabled only for */
> +/* SPI slave mode.                                                  */
> +#define QUIRKS_SRAM_ADDITIONAL_CPY     1

BIT(0)

> +/* If set the trailing bytes won't be handled by the DMA.           */
> +/* Trailing byte feature not fully available.                       */
> +#define QUIRKS_DMA_USE_NO_TRAIL                2

BIT(1)

> +/* If set, the driver will use PM_QOS to reduce the latency         */
> +/* introduced by the deeper C-states which may produce over/under   */
> +/* run issues. Must be used in slave mode. In master mode, the      */
> +/* latency is not critical, but setting this workarround  may       */
> +/* improve the SPI throughput.                                      */
> +#define QUIRKS_USE_PM_QOS              4

BIT(2)

> +/* This quirks is set on Moorestown                                 */
> +#define QUIRKS_PLATFORM_MRST           8

BIT(3)

> +/* This quirks is set on Medfield                                   */
> +#define QUIRKS_PLATFORM_MDFL           16

BIT(4)

> +/* If set, the driver will apply the bitbanging workarround needed  */
> +/* to enable defective Langwell stepping A SSP. The defective SSP   */
> +/* can be enabled only once, and should never be disabled.          */
> +#define QUIRKS_BIT_BANGING             32

BIT(5)

> +/* If set, SPI is in slave clock mode                               */
> +#define QUIRKS_SPI_SLAVE_CLOCK_MODE    64

BIT(6)

> +/* Uncomment to get RX and TX short dumps after each transfer */
> +/* #define DUMP_RX 1 */

As mentioned in the main file, convert to a Kconfig verbose config option.

> +#define MAX_TRAILING_BYTE_RETRY 16
> +#define MAX_TRAILING_BYTE_LOOP 100

Max iterations?

> +#define DELAY_TO_GET_A_WORD 3
> +#define DFLT_TIMEOUT_VAL 500

milliseconds?

> +#define DEFINE_SSP_REG(reg, off) \
> +static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
> +static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }

But the other code is using io-accessors, so what about you use
ioread32()/iowrite32() instead?

In any way readl_relaxed() and writel_relaxed() would be preferable
to this I think?

> +#define RX_DIRECTION 0
> +#define TX_DIRECTION 1
> +
> +#define I2C_ACCESS_USDELAY 10
> +
> +#define DFLT_BITS_PER_WORD 16
> +#define MIN_BITS_PER_WORD     4
> +#define MAX_BITS_PER_WORD     32
> +#define DFLT_FIFO_BURST_SIZE   IMSS_FIFO_BURST_8
> +
> +#define TRUNCATE(x, a) ((x) & ~((a)-1))

Too generic name. And what it does is actually mask the (a) upper
bits so it's misleading too.

I'm confused over this macro, as per comments in the code, and
suspect it should be removed in favor of integer division.

If you have to keep this then atleast rewrite it using a
static inline.

> +DEFINE_SSP_REG(SSCR0, 0x00)
> +DEFINE_SSP_REG(SSCR1, 0x04)
> +DEFINE_SSP_REG(SSSR, 0x08)
> +DEFINE_SSP_REG(SSITR, 0x0c)
> +DEFINE_SSP_REG(SSDR, 0x10)
> +DEFINE_SSP_REG(SSTO, 0x28)
> +DEFINE_SSP_REG(SSPSP, 0x2c)
> +
> +DEFINE_SSP_REG(I2CCTRL, 0x00);
> +DEFINE_SSP_REG(I2CDATA, 0x04);
> +
> +DEFINE_SSP_REG(GPLR1, 0x04);
> +DEFINE_SSP_REG(GPDR1, 0x0c);
> +DEFINE_SSP_REG(GPSR1, 0x14);
> +DEFINE_SSP_REG(GPCR1, 0x1C);
> +DEFINE_SSP_REG(GAFR1_U, 0x44);
> +
> +#define SYSCFG  0x20bc0

Which means?

> +#define SRAM_BASE_ADDR 0xfffdc000

Should be passed as resource, se above reasoning for the
"I2C" base address. What happens on next ASIC spin when
the engineer move this base offset etc, don't you have any
system discovery?

> +#define SRAM_RX_ADDR   SRAM_BASE_ADDR
> +#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
> +
> +#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
> +#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */

No lowercase in macros at all please.

SSCR0_DATASIZE() is fine.

> +#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
> +#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */

Dito.

And this is a very funny way to define the integer "0".

I understand the intent but...

> +#define SSCR0_ECS   (1 << 6) /* External clock select */

BIT(6)

> +#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */

BIT(7)

> +
> +#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
> +#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */

Uppercase.

> +#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
> +#define SSCR0_NCS   (1 << 21)           /* Network clock select */
> +#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
> +#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */

BIT(20), BIT(21) ...

> +#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
> +#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */

Uppercase.

> +#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
> +#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */

BIT(30), BIT(31)

> +#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
> +#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
> +#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
> +#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
> +#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
> +#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
> +#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
> +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
> +#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
> +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */

BIT(0), BIT(1)...

> +#define SSSR_TNF               (1 << 2)        /* Tx FIFO Not Full */
> +#define SSSR_RNE               (1 << 3)        /* Rx FIFO Not Empty */
> +#define SSSR_BSY               (1 << 4)        /* SSP Busy */
> +#define SSSR_TFS               (1 << 5)        /* Tx FIFO Service Request */
> +#define SSSR_RFS               (1 << 6)        /* Rx FIFO Service Request */
> +#define SSSR_ROR               (1 << 7)        /* Rx FIFO Overrun */

You know the drill.

> +#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
> +
> +#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
> +#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
> +#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
> +#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
> +
> +#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
> +#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
> +#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
> +#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
> +#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
> +#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
> +#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
> +#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
> +#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
> +#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
> +#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
> +#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
> +#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
> +#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
> +#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
> +#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
> +#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
> +#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
> +#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
> +
> +#define SSSR_BCE         (1 << 23) /* Bit Count Error */
> +#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
> +#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
> +#define SSSR_EOC         (1 << 20) /* End Of Chain */
> +#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
> +#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */

Use BIT() macro throughout.

> +#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
> +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
> +#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
> +#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
> +#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
> +#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
> +#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
> +#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
> +#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */

(...)

> +/*
> + * For testing SSCR1 changes that require SSP restart, basically
> + * everything except the service and interrupt enables
> + */
> +
> +#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
> +                               | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
> +                               | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
> +                               | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
> +                               | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
> +                               | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
> +
> +struct callback_param {
> +       void *drv_context;
> +       u32 direction;
> +};
> +

Convert the inline documentation below to use kerneldoc.

> +struct ssp_driver_context {
> +       /* Driver model hookup */
> +       struct pci_dev *pdev;
> +
> +       /* SPI framework hookup */
> +       struct spi_master *master;
> +
> +       /* SSP register addresses */
> +       unsigned long paddr;
> +       void *ioaddr;
> +       int irq;
> +
> +       /* I2C registers */
> +       dma_addr_t I2C_paddr;
> +       void *I2C_ioaddr;

Skip the caps.

i2c_paddr, i2c_ioaddr is fine.

But I think "paddr" is a bad name because it probably spells
out "physical address", "daddr" is more to the point, because
dma address is not necessarily == physical address.

> +       /* SSP masks*/
> +       u32 cr1_sig;
> +       u32 cr1;
> +       u32 clear_sr;
> +       u32 mask_sr;
> +
> +       /* PM_QOS request */
> +       struct pm_qos_request pm_qos_req;
> +
> +       struct tasklet_struct poll_transfer;
> +
> +       spinlock_t lock;
> +
> +       /* Current message transfer state info */
> +       struct spi_message *cur_msg;
> +       size_t len;
> +       size_t len_dma_rx;
> +       size_t len_dma_tx;
> +       void *tx;
> +       void *tx_end;
> +       void *rx;
> +       void *rx_end;
> +       bool dma_initialized;
> +       int dma_mapped;
> +       dma_addr_t rx_dma;
> +       dma_addr_t tx_dma;
> +       u8 n_bytes;
> +       int (*write)(struct ssp_driver_context *drv_context);
> +       int (*read)(struct ssp_driver_context *drv_context);
> +
> +       struct intel_mid_dma_slave    dmas_tx;
> +       struct intel_mid_dma_slave    dmas_rx;
> +       struct dma_chan    *txchan;
> +       struct dma_chan    *rxchan;
> +       struct workqueue_struct *dma_wq;
> +       struct work_struct complete_work;
> +
> +       u8 __iomem *virt_addr_sram_tx;
> +       u8 __iomem *virt_addr_sram_rx;
> +
> +       int txdma_done;
> +       int rxdma_done;
> +       struct callback_param tx_param;
> +       struct callback_param rx_param;

With kerneldoc it's easier to tell what the usecase is for these
callbacks.

> +       struct pci_dev *dmac1;

It seems that something like a pci_dev * should be used
to refer to the I2C and SRAM as well?

> +
> +       unsigned long quirks;
> +       u32 rx_fifo_threshold;
> +};
> +
> +struct chip_data {
> +       u32 cr0;
> +       u32 cr1;
> +       u32 timeout;
> +       u8 n_bytes;
> +       u8 dma_enabled;

bool?

> +       u8 bits_per_word;
> +       u32 speed_hz;

Should that be u32? unsigned int seems more apropriate for a frequency.

> +       int (*write)(struct ssp_driver_context *drv_context);
> +       int (*read)(struct ssp_driver_context *drv_context);
> +};

kerneldoc me.

> +enum intel_mid_ssp_spi_fifo_burst {
> +       IMSS_FIFO_BURST_1,
> +       IMSS_FIFO_BURST_4,
> +       IMSS_FIFO_BURST_8
> +};
> +
> +/* spi_board_info.controller_data for SPI slave devices,
> + * copied to spi_device.platform_data ... mostly for dma tuning
> + */
> +struct intel_mid_ssp_spi_chip {
> +       enum intel_mid_ssp_spi_fifo_burst burst_size;
> +       u32 timeout;
> +       u8 enable_loopback;
> +       u8 dma_enabled;

The last two entries looks like they should be bool.

> +};

kerneldoc.

> +#define SPI_DIB_NAME_LEN  16
> +#define SPI_DIB_SPEC_INFO_LEN      10
> +
> +struct spi_dib_header {
> +       u32       signature;
> +       u32       length;
> +       u8         rev;
> +       u8         checksum;
> +       u8         dib[0];
> +} __packed;

Why is this packed?

Yours,
Linus Walleij

------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]   ` <CACRpkdad3fHxWRpRqD-eP8-sKKexN+s-JZCT6XLggv92Q=5kMA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2012-12-18  5:47     ` chao bi
  2012-12-20 15:32       ` Linus Walleij
  2013-01-09  4:25       ` Vinod Koul
  0 siblings, 2 replies; 26+ messages in thread
From: chao bi @ 2012-12-18  5:47 UTC (permalink / raw)
  To: Linus Walleij
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w, Vinod Koul,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, Dan Williams,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

Dear Linus,
Thanks for your kind comments. Seems you were viewing the 1st version, I've
submitted 2nd version and to deliver the 3rd version soon, will include you
for review.

Please see my comments inline below.

On Mon, 2012-12-17 at 12:23 +0100, Linus Walleij wrote:
> On Wed, Nov 21, 2012 at 3:16 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:
> 
> > +#ifdef DUMP_RX
> 
> So since this #define DUMP_RX is not part of this patch and not of the
> kernel at large it's basically an #if 0 and all the code within such
> defines should be deleted.
> 
> But I guess you have this undocumented feature that the developer
> is supposed to hack the file and insert #define DUMP_RX to use it.

yes, so I'm to deleted it in this patch, and the "DUMP" feature is to
submitted as another patch, which is referred to your method.

> > +static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
> 
> Change return type to bool if you're just returning 0 or 1.
> 
> > +{
> > +       u32 sssr;
> > +       sssr = read_SSSR(drv_context->ioaddr);
> > +       if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
> > +               return 0;
> > +       else
> > +               return 1;
> > +}
> 
> return false/true.
> 
> > +static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
> > +{
> > +       return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
> > +}
> 
> Dito. Here it is even more obvious.
> 

It's already done in 2nd version.

> > +static void flush(struct ssp_driver_context *drv_context)
> > +{
> > +       void *reg = drv_context->ioaddr;
> > +       u32 i = 0;
> > +
> > +       /* If the transmit fifo is not empty, reset the interface. */
> > +       if (!is_tx_fifo_empty(drv_context)) {
> > +               dev_err(&drv_context->pdev->dev,
> > +                               "TX FIFO not empty. Reset of SPI IF");
> > +               disable_interface(drv_context);
> > +               return;
> > +       }
> > +
> > +       dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
> > +       while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
> > +               read_SSDR(reg);
> > +               i++;
> > +       }
> > +       WARN(i > 0, "%d words flush occured\n", i);
> 
> WARN really? Why not dev_warn()?

It's suppose that SPI FIFO is empty after each transfer, so call flush()
before each time of transfer, if any remain data in SPI FIFO,
It shows some kinds of Error must be happened in last transfer and
deserves a WARN() to record it.

> > +static int null_writer(struct ssp_driver_context *drv_context)
> > +static int null_reader(struct ssp_driver_context *drv_context)
> > +static int u8_writer(struct ssp_driver_context *drv_context)
> > +static int u8_reader(struct ssp_driver_context *drv_context)
> > +static int u16_writer(struct ssp_driver_context *drv_context)
> > +static int u16_reader(struct ssp_driver_context *drv_context)
> > +static int u32_writer(struct ssp_driver_context *drv_context)
> > +static int u32_reader(struct ssp_driver_context *drv_context)
> 
> These seem to all be designed to return 0 or 1 and should then be
> bool. It seems strange actually, you would expect that such a
> function returns the number of bytes or words read/written.
> 

these functions are only used by PIO transfer, its name shows how many
bytes to be written/read, and to return whether write/read is success.
If returns bytes of read/written seems make no sense for PIO transfer,
so I think return type of boot is enough, what do you think?

> > +/**
> > + * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
> > + * @drv_context:       Pointer to the private driver context
> > + */
> > +static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
> > +{
> > +       u32 length = drv_context->len;
> > +
> > +       if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
> > +               && (drv_context->len > drv_context->rx_fifo_threshold *
> > +               drv_context->n_bytes))
> > +               length = TRUNCATE(drv_context->len,
> > +                       drv_context->rx_fifo_threshold * drv_context->n_bytes);
> 
> TRUNCATE is a too generic name but I'll leave that comment for
> the header file where it's defined.
> 
In version 2, most contents have been moved to c file to avoid name
conflicts.

> It looks very strange.
> 
> Isn't this simply an arithmetic soup construction to say:
> 
> length = drv_context->len / (drv_context->rx_fifo_threshold *
> drv_context->n_bytes);

I think TRUNCATE() is different:
#define TRUNCATE(x, a) ((x) & ~((a)-1))

> > +static void int_transfer_complete(struct ssp_driver_context *drv_context)
> > +{
> > +       void *reg = drv_context->ioaddr;
> > +       struct spi_message *msg;
> > +       struct device *dev = &drv_context->pdev->dev;
> > +
> > +       if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
> > +               pm_qos_update_request(&drv_context->pm_qos_req,
> > +                                       PM_QOS_DEFAULT_VALUE);
> 
> It's weird that using PM QoS is treated as an unlikely oddity.
> Should it not be the other way around?
> 
> > +
> > +       if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
> > +               sram_to_ddr_cpy(drv_context);
> > +
> > +       if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
> > +               drain_trail(drv_context);
> 
> Uusally likely() / unlikely() micro-optimization is discouraged,
> do you have specific performance numbers for using it so
> much here?
> 

I haven't done any sort of performance test for likely/unlikely, but per
our test on Medfield Platform, it meets our performance request. By the
way, would you please illustrate why likely/unlikely is discouraged. If
it impacts on performance, maybe we could consider to propose another
enhancement patch to optimize performance, but this should based on
test.

> > +       else
> > +               /* Stop getting Time Outs */
> > +               write_SSTO(0, reg);
> > +
> > +       drv_context->cur_msg->status = 0;
> > +       drv_context->cur_msg->actual_length = drv_context->len;
> > +
> > +#ifdef DUMP_RX
> > +       dump_trailer(dev, drv_context->rx, drv_context->len, 16);
> > +#endif
> 
> Atleast avoid doing these inlined #ifdefs please.
> 
> Define a stub up-there where it's defined instead:
> 
> #if DUMP_RX
> void dump_trailer()
> {
> ...
> }
> #else
> void static inline dump_trailer() {}
> #endif
> 
> The kernel already relies on the compiler to remove such
> code completely, so should be done here as well.
> 
> > +       dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
> > +       msg = drv_context->cur_msg;
> > +       if (likely(msg->complete))
> > +               msg->complete(msg->context);
> > +}
> 
> So this is duplicating the code in the bulk SPI code.
> 
> Please try to use the generic transfer queue, it's really nice.
> 
> > +static void int_transfer_complete_work(struct work_struct *work)
> > +{
> > +       struct ssp_driver_context *drv_context = container_of(work,
> > +                               struct ssp_driver_context, complete_work);
> > +
> > +       int_transfer_complete(drv_context);
> > +}
> 
> What does "int_" mean in the above function signature?
> 
> Repeatedly in the file says interrupt mode is not supported so it can't
> be "interrupt". "internal"?
> 
> Actaully it seems this is dead code. The only reference in the
> driver appears to be when an unused WQ us pointed at this
> function.
> 
> I think you have some clean-up to do if you don't support interrupt
> mode.
> 

It's changed in new patch, new patch will adapt new SPI core workqueue
management, please refer to 3rd version patch which I'll send out later
today.

> 
> > +/**
> > + * ssp_int() - Interrupt handler
> > + * @irq
> > + * @dev_id
> > + *
> > + * The SSP interrupt is not used for transfer which are handled by
> > + * DMA or polling: only under/over run are catched to detect
> > + * broken transfers.
> > + */
> > +static irqreturn_t ssp_int(int irq, void *dev_id)
> > +{
> > +       struct ssp_driver_context *drv_context = dev_id;
> > +       void *reg = drv_context->ioaddr;
> > +       struct device *dev = &drv_context->pdev->dev;
> > +       u32 status = read_SSSR(reg);
> > +
> > +       /* It should never be our interrupt since SSP will */
> > +       /* only trigs interrupt for under/over run.        */
> 
> /*
>  * Squash to some nice multiline comment will you?
>  */

Sure.

> 
> > +       if (likely(!(status & drv_context->mask_sr)))
> > +               return IRQ_NONE;
> > +
> > +       if (status & SSSR_ROR || status & SSSR_TUR) {
> > +               dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n", status);
> > +               WARN_ON(1);
> > +               if (status & SSSR_ROR)
> > +                       dev_err(dev, "we have Overrun\n");
> > +               if (status & SSSR_TUR)
> > +                       dev_err(dev, "we have Underrun\n");
> > +       }
> > +
> > +       /* We can fall here when not using DMA mode */
> 
> fall? fail?
> 
I think it's "fall"

> > +       if (!drv_context->cur_msg) {
> > +               disable_interface(drv_context);
> > +               disable_triggers(drv_context);
> > +       }
> 
> So you only do something if you *don't* have any messages?
> 
> > +       /* clear status register */
> > +       write_SSSR(drv_context->clear_sr, reg);
> > +       return IRQ_HANDLED;
> > +}
> 
> I can't see how this error interrupt handler actually handles
> errors. Has this been tested? It only seems you print an error
> message and go on as usual.
> 
> You should tear down the ongoing transfer, set the msg->status
> to some error code and/or retransmit or something should you not?
> 
> Atleast put in a TODO so that maintainers of this driver
> know that there's something unhandled here.

when ROR/TOR happened, transfer will not continue because it will never
receive DMA complete interrupts, therefore transfer loop is stopped. But
Error recovery mechanism is TODO and will be validated and updated in
other patches, recovery handler is not to be done in this interrupt
handler.

> 
> (...)
> > +/**
> > + * start_bitbanging() - Clock synchronization by bit banging
> > + * @drv_context:       Pointer to private driver context
> > + *
> > + * This clock synchronization will be removed as soon as it is
> > + * handled by the SCU.
> > + */
> > +static void start_bitbanging(struct ssp_driver_context *drv_context)
> > +{
> > +       u32 sssr;
> > +       u32 count = 0;
> > +       u32 cr0;
> > +       void *i2c_reg = drv_context->I2C_ioaddr;
> > +       struct device *dev = &drv_context->pdev->dev;
> > +       void *reg = drv_context->ioaddr;
> > +       struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
> > +       cr0 = chip->cr0;
> > +
> > +       dev_warn(dev, "In %s : Starting bit banging\n",\
> > +               __func__);
> > +       if (read_SSSR(reg) & SSP_NOT_SYNC)
> > +               dev_warn(dev, "SSP clock desynchronized.\n");
> > +       if (!(read_SSCR0(reg) & SSCR0_SSE))
> > +               dev_warn(dev, "in SSCR0, SSP disabled.\n");
> > +
> > +       dev_dbg(dev, "SSP not ready, start CLK sync\n");
> > +
> > +       write_SSCR0(cr0 & ~SSCR0_SSE, reg);
> > +       write_SSPSP(0x02010007, reg);
> 
> Aha 0x0201007.
> 
> Usually we define the bitfields, and actually you have:
> <snip from header file>
> +#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
> +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
> +#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
> +#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
> +#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
> +#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
> +#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
> +#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
> +#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
> <end>
> 
> Please use these bit specifiers to conjure the magic number instead.
> 
> (...)
> > +       write_I2CDATA(0x3, i2c_reg);
> > +       udelay(I2C_ACCESS_USDELAY);
> > +       write_I2CCTRL(0x01070034, i2c_reg);
> > +       udelay(I2C_ACCESS_USDELAY);
> > +       write_I2CDATA(0x00000099, i2c_reg);
> > +       udelay(I2C_ACCESS_USDELAY);
> > +       write_I2CCTRL(0x01070038, i2c_reg);
> > +       udelay(I2C_ACCESS_USDELAY);
> 
> Dito.
> 
> > +       /* Bit bang the clock until CSS clears */
> > +       while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
> > +               write_I2CDATA(0x2, i2c_reg);
> > +               udelay(I2C_ACCESS_USDELAY);
> > +               write_I2CCTRL(0x01070034, i2c_reg);
> > +               udelay(I2C_ACCESS_USDELAY);
> > +               write_I2CDATA(0x3, i2c_reg);
> > +               udelay(I2C_ACCESS_USDELAY);
> > +               write_I2CCTRL(0x01070034, i2c_reg);
> > +               udelay(I2C_ACCESS_USDELAY);
> 
> 
> Dito.
> 
> > +       if (count >= MAX_BITBANGING_LOOP)
> > +               dev_err(dev,
> > +                       "ERROR in %s : infinite loop on bit banging. Aborting\n",
> > +                       __func__);
> > +
> > +       dev_dbg(dev, "---Bit bang count=%d\n", count);
> > +
> > +       write_I2CDATA(0x0, i2c_reg);
> > +       udelay(I2C_ACCESS_USDELAY);
> > +       write_I2CCTRL(0x01070038, i2c_reg);
> 
> Dito.

> > +static unsigned int ssp_get_clk_div(int speed)
> > +{
> > +       return max(100000000 / speed, 4) - 1;
> > +}
> 
> This was nice to see, good use of the max() operator!
> 
> (...)
> > +static int transfer(struct spi_device *spi, struct spi_message *msg)
> 
> This is duplicating the core message transfer queue.
> 
> Refactor this code to use the new infrastructure.
> 
> (...)

It's updated.

> > +static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
> > +       const struct pci_device_id *ent)
> > +{
> > +       struct device *dev = &pdev->dev;
> > +       struct spi_master *master;
> > +       struct ssp_driver_context *drv_context = 0;
> > +       int status;
> > +       u32 iolen = 0;
> > +       u8 ssp_cfg;
> > +       int pos;
> > +       void __iomem *syscfg_ioaddr;
> > +       unsigned long syscfg;
> > +
> > +       /* Check if the SSP we are probed for has been allocated */
> > +       /* to operate as SPI. This information is retreived from */
> > +       /* the field adid of the Vendor-Specific PCI capability  */
> > +       /* which is used as a configuration register.            */
> 
> /*
>  * Convert to multiline comment
>  */

Yes..

> 
> > +       if (drv_context->quirks & QUIRKS_BIT_BANGING) {
> > +               /* Bit banging on the clock is done through */
> > +               /* DFT which is available through I2C.      */
> > +               /* get base address of I2C_Serbus registers */
> > +               drv_context->I2C_paddr = 0xff12b000;
> 
> What on earth is this?
> 
> Note the comment says "get base address", you're not getting it at
> all, you're hardcoding it. Resources like this should be passed in from
> the outside.
> 

Ok, the address is fixed for current platform, I'll define the address
at beginning and here just refer to the macro, if other platform, this
address should be defined as other value.

> What will happen in the next platform when some ASIC engineer
> decide to move this some pages ahead?
> 
> Don't you have some platform data/device tree/ACPI table or
> whatever where this is supposed to be stored?
> 
> Looks like a criss-cross dependency to some I2C block, and
> as such it deserves a big fat comment about the weirdness
> going on here.
> 
> > +               drv_context->I2C_ioaddr =
> > +                       ioremap_nocache(drv_context->I2C_paddr, 0x10);
> 
> Like the size of that ioregion.
> 
> And use devm_ioremap_nocache() to utilize managed resources.
> 
> > +               if (!drv_context->I2C_ioaddr) {
> > +                       status = -ENOMEM;
> > +                       goto err_free_3;
> > +               }
> > +       }
> > +
> > +       /* Attach to IRQ */
> > +       drv_context->irq = pdev->irq;
> > +       status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
> > +               "intel_mid_ssp_spi", drv_context);
> 
> Use managed resources throughout:
> devm_request_irq() in this case.


> 
> > +       if (status < 0) {
> > +               dev_err(&pdev->dev, "can not get IRQ\n");
> > +               goto err_free_4;
> > +       }
> > +
> > +       if (drv_context->quirks & QUIRKS_PLATFORM_MDFL) {
> > +               /* get base address of DMA selector. */
> > +               syscfg = drv_context->paddr - SYSCFG;
> > +               syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
> 
> devm_ioremap_nocache()
> 
> > +               if (!syscfg_ioaddr) {
> > +                       status = -ENOMEM;
> > +                       goto err_free_5;
> > +               }
> > +               iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
> > +       }
> > +
> > +       tasklet_init(&drv_context->poll_transfer, poll_transfer,
> > +               (unsigned long)drv_context);
> 
> I think this tasklet can be removed and you can have the SPI core
> message queue drive the transfers. But prove me wrong.
> 

Agree with you, please view the 3rd patch..

> > +       /* Register with the SPI framework */
> > +       dev_info(dev, "register with SPI framework (bus spi%d)\n",
> > +               master->bus_num);
> > +
> > +       status = spi_register_master(master);
> > +
> > +       if (status != 0) {
> > +               dev_err(dev, "problem registering spi\n");
> > +               goto err_free_5;
> > +       }
> > +
> > +       pci_set_drvdata(pdev, drv_context);
> > +
> > +       /* Create the PM_QOS request */
> > +       if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> > +               pm_qos_add_request(&drv_context->pm_qos_req,
> > +               PM_QOS_CPU_DMA_LATENCY,
> > +               PM_QOS_DEFAULT_VALUE);
> > +
> > +       return status;
> > +
> > +err_free_5:
> > +       free_irq(drv_context->irq, drv_context);
> > +err_free_4:
> > +       iounmap(drv_context->I2C_ioaddr);
> > +err_free_3:
> > +       iounmap(drv_context->ioaddr);
> 
> These three go away with managed devm_* resources.
> 
> > +err_free_2:
> > +       pci_release_region(pdev, 0);
> > +err_free_1:
> > +       spi_master_put(master);
> > +err_free_0:
> > +       pci_disable_device(pdev);
> > +
> > +       return status;
> > +err_abort_probe:
> > +       dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
> > +               pdev->vendor, pdev->device);
> > +       return -ENODEV;
> > +}
> 
> (...)
> > +static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
> > +{
> > +       struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> > +
> > +       if (!drv_context)
> > +               return;
> > +
> > +       /* Release IRQ */
> > +       free_irq(drv_context->irq, drv_context);
> > +
> > +       iounmap(drv_context->ioaddr);
> > +       if (drv_context->quirks & QUIRKS_BIT_BANGING)
> > +               iounmap(drv_context->I2C_ioaddr);
> 
> These also go away with devm_*

OK..

> 
> > +
> > +       /* disconnect from the SPI framework */
> > +       spi_unregister_master(drv_context->master);
> > +
> > +       pci_set_drvdata(pdev, NULL);
> > +       pci_release_region(pdev, 0);
> > +       pci_disable_device(pdev);
> > +
> > +       return;
> > +}
> > +
> > +#ifdef CONFIG_PM
> > +/**
> > + * intel_mid_ssp_spi_suspend() - Driver suspend procedure
> > + * @pdev:      Pointer to the pci_dev struct
> > + * @state:     pm_message_t
> > + */
> > +static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
> > +{
> > +       struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> > +       dev_dbg(&pdev->dev, "suspend\n");
> > +
> > +       tasklet_disable(&drv_context->poll_transfer);
> > +
> > +       return 0;
> > +}
> 
> When using the central message queue you probably just call
> spi_master_suspend()
> spi_master_resume()
> 
> here and the framework takes care of the message queue.

PM related process is not implemented in this patch, it's TODO following
this patch, let's handle it together in that patch.

> 
> (...)
> > +++ b/include/linux/spi/spi-intel-mid-ssp.h
> > @@ -0,0 +1,326 @@
> > +/*
> > + *  Copyright (C) Intel 2009
> > + *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> > + *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> > + *
> > + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> 
> Cut these nice wallpapers... OK no big deal maybe.
> 
> (...)
> 
> For the following review comments, begin with adding:
> 
> #include <linux/bitops.h>
> 
> So you get the BIT() macro and some more.
> 
> > +#define SSP_NOT_SYNC 0x400000
> 
> Then you can write:
> 
> #define SSP_NOT_SYNC BIT(22)
> 
> Which tell us what this is actually about. A flag in bit 22.

That's nice, thank you.

> 
> > +#define MAX_SPI_TRANSFER_SIZE 8192
> > +#define MAX_BITBANGING_LOOP   10000
> > +#define SPI_FIFO_SIZE 16
> > +
> > +/* PM QoS define */
> > +#define MIN_EXIT_LATENCY 20
> 
> State unit. milliseconds I think?

usec..

> 
> > +/* SSP assignement configuration from PCI config */
> > +#define SSP_CFG_GET_MODE(ssp_cfg)      ((ssp_cfg) & 0x07)
> > +#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)        (((ssp_cfg) >> 3) & 0x07)
> > +#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)  ((ssp_cfg) & 0x40)
> 
> & BIT(6)
> 
> > +#define SSP_CFG_SPI_MODE_ID            1
> > +/* adid field offset is 6 inside the vendor specific capability */
> > +#define VNDR_CAPABILITY_ADID_OFFSET    6
> > +
> > +/* Driver's quirk flags */
> > +/* This workarround bufferizes data in the audio fabric SDRAM from  */
> > +/* where the DMA transfers will operate. Should be enabled only for */
> > +/* SPI slave mode.                                                  */
> > +#define QUIRKS_SRAM_ADDITIONAL_CPY     1
> 
> BIT(0)
> 
> > +/* If set the trailing bytes won't be handled by the DMA.           */
> > +/* Trailing byte feature not fully available.                       */
> > +#define QUIRKS_DMA_USE_NO_TRAIL                2
> 
> BIT(1)
> 
> > +/* If set, the driver will use PM_QOS to reduce the latency         */
> > +/* introduced by the deeper C-states which may produce over/under   */
> > +/* run issues. Must be used in slave mode. In master mode, the      */
> > +/* latency is not critical, but setting this workarround  may       */
> > +/* improve the SPI throughput.                                      */
> > +#define QUIRKS_USE_PM_QOS              4
> 
> BIT(2)
> 
> > +/* This quirks is set on Moorestown                                 */
> > +#define QUIRKS_PLATFORM_MRST           8
> 
> BIT(3)
> 
> > +/* This quirks is set on Medfield                                   */
> > +#define QUIRKS_PLATFORM_MDFL           16
> 
> BIT(4)
> 
> > +/* If set, the driver will apply the bitbanging workarround needed  */
> > +/* to enable defective Langwell stepping A SSP. The defective SSP   */
> > +/* can be enabled only once, and should never be disabled.          */
> > +#define QUIRKS_BIT_BANGING             32
> 
> BIT(5)
> 
> > +/* If set, SPI is in slave clock mode                               */
> > +#define QUIRKS_SPI_SLAVE_CLOCK_MODE    64
> 
> BIT(6)
> 
> > +/* Uncomment to get RX and TX short dumps after each transfer */
> > +/* #define DUMP_RX 1 */
> 
> As mentioned in the main file, convert to a Kconfig verbose config option.
> 
> > +#define MAX_TRAILING_BYTE_RETRY 16
> > +#define MAX_TRAILING_BYTE_LOOP 100
> 
> Max iterations?
> 
> > +#define DELAY_TO_GET_A_WORD 3
> > +#define DFLT_TIMEOUT_VAL 500
> 
> milliseconds?
> 

It depends on peripheral clock frequency.

> > +#define DEFINE_SSP_REG(reg, off) \
> > +static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
> > +static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
> 
> But the other code is using io-accessors, so what about you use
> ioread32()/iowrite32() instead?
> 
> In any way readl_relaxed() and writel_relaxed() would be preferable
> to this I think?
> 
> > +#define RX_DIRECTION 0
> > +#define TX_DIRECTION 1
> > +
> > +#define I2C_ACCESS_USDELAY 10
> > +
> > +#define DFLT_BITS_PER_WORD 16
> > +#define MIN_BITS_PER_WORD     4
> > +#define MAX_BITS_PER_WORD     32
> > +#define DFLT_FIFO_BURST_SIZE   IMSS_FIFO_BURST_8
> > +
> > +#define TRUNCATE(x, a) ((x) & ~((a)-1))
> 
> Too generic name. And what it does is actually mask the (a) upper
> bits so it's misleading too.
> 
> I'm confused over this macro, as per comments in the code, and
> suspect it should be removed in favor of integer division.
> 
> If you have to keep this then atleast rewrite it using a
> static inline.
> 
> > +DEFINE_SSP_REG(SSCR0, 0x00)
> > +DEFINE_SSP_REG(SSCR1, 0x04)
> > +DEFINE_SSP_REG(SSSR, 0x08)
> > +DEFINE_SSP_REG(SSITR, 0x0c)
> > +DEFINE_SSP_REG(SSDR, 0x10)
> > +DEFINE_SSP_REG(SSTO, 0x28)
> > +DEFINE_SSP_REG(SSPSP, 0x2c)
> > +
> > +DEFINE_SSP_REG(I2CCTRL, 0x00);
> > +DEFINE_SSP_REG(I2CDATA, 0x04);
> > +
> > +DEFINE_SSP_REG(GPLR1, 0x04);
> > +DEFINE_SSP_REG(GPDR1, 0x0c);
> > +DEFINE_SSP_REG(GPSR1, 0x14);
> > +DEFINE_SSP_REG(GPCR1, 0x1C);
> > +DEFINE_SSP_REG(GAFR1_U, 0x44);
> > +
> > +#define SYSCFG  0x20bc0
> 
> Which means?
> 
> > +#define SRAM_BASE_ADDR 0xfffdc000
> 
> Should be passed as resource, se above reasoning for the
> "I2C" base address. What happens on next ASIC spin when
> the engineer move this base offset etc, don't you have any
> system discovery?
This is fix value for Moorestown & Medfield platforms as what is
declared in the file header. If any hardware change, the address should
be changed accordantly.
> 
> > +#define SRAM_RX_ADDR   SRAM_BASE_ADDR
> > +#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
> > +
> > +#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
> > +#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
> 
> No lowercase in macros at all please.

Yes.

> 
> SSCR0_DATASIZE() is fine.
> 
> > +#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
> > +#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
> 
> Dito.
> 
> And this is a very funny way to define the integer "0".
> 
> I understand the intent but...

Nod..

> 
> > +#define SSCR0_ECS   (1 << 6) /* External clock select */
> 
> BIT(6)
> 
> > +#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
> 
> BIT(7)
> 
> > +
> > +#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
> > +#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
> 
> Uppercase.
> 
> > +#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
> > +#define SSCR0_NCS   (1 << 21)           /* Network clock select */
> > +#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
> > +#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
> 
> BIT(20), BIT(21) ...
> 
> > +#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
> > +#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
> 
> Uppercase.
> 
> > +#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
> > +#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
> 
> BIT(30), BIT(31)
> 
> > +#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
> > +#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
> > +#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
> > +#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
> > +#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
> > +#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
> > +#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
> > +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
> > +#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
> > +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
> 
> BIT(0), BIT(1)...
> 
> > +#define SSSR_TNF               (1 << 2)        /* Tx FIFO Not Full */
> > +#define SSSR_RNE               (1 << 3)        /* Rx FIFO Not Empty */
> > +#define SSSR_BSY               (1 << 4)        /* SSP Busy */
> > +#define SSSR_TFS               (1 << 5)        /* Tx FIFO Service Request */
> > +#define SSSR_RFS               (1 << 6)        /* Rx FIFO Service Request */
> > +#define SSSR_ROR               (1 << 7)        /* Rx FIFO Overrun */
> 
> You know the drill.
> 
> > +#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
> > +
> > +#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
> > +#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
> > +#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
> > +#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
> > +
> > +#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
> > +#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
> > +#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
> > +#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
> > +#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
> > +#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
> > +#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
> > +#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
> > +#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
> > +#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
> > +#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
> > +#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
> > +#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
> > +#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
> > +#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
> > +#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
> > +#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
> > +#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
> > +#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
> > +
> > +#define SSSR_BCE         (1 << 23) /* Bit Count Error */
> > +#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
> > +#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
> > +#define SSSR_EOC         (1 << 20) /* End Of Chain */
> > +#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
> > +#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
> 
> Use BIT() macro throughout.
> 
> > +#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
> > +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
> > +#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
> > +#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
> > +#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
> > +#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
> > +#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
> > +#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
> > +#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
> 
> (...)
> 
> > +/*
> > + * For testing SSCR1 changes that require SSP restart, basically
> > + * everything except the service and interrupt enables
> > + */
> > +
> > +#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
> > +                               | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
> > +                               | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
> > +                               | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
> > +                               | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
> > +                               | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
> > +
> > +struct callback_param {
> > +       void *drv_context;
> > +       u32 direction;
> > +};
> > +
> 
> Convert the inline documentation below to use kerneldoc.
> 
> > +struct ssp_driver_context {
> > +       /* Driver model hookup */
> > +       struct pci_dev *pdev;
> > +
> > +       /* SPI framework hookup */
> > +       struct spi_master *master;
> > +
> > +       /* SSP register addresses */
> > +       unsigned long paddr;
> > +       void *ioaddr;
> > +       int irq;
> > +
> > +       /* I2C registers */
> > +       dma_addr_t I2C_paddr;
> > +       void *I2C_ioaddr;
> 
> Skip the caps.
> 
> i2c_paddr, i2c_ioaddr is fine.
> 
> But I think "paddr" is a bad name because it probably spells
> out "physical address", "daddr" is more to the point, because
> dma address is not necessarily == physical address.
> 
Yes..

> > +       /* SSP masks*/
> > +       u32 cr1_sig;
> > +       u32 cr1;
> > +       u32 clear_sr;
> > +       u32 mask_sr;
> > +
> > +       /* PM_QOS request */
> > +       struct pm_qos_request pm_qos_req;
> > +
> > +       struct tasklet_struct poll_transfer;
> > +
> > +       spinlock_t lock;
> > +
> > +       /* Current message transfer state info */
> > +       struct spi_message *cur_msg;
> > +       size_t len;
> > +       size_t len_dma_rx;
> > +       size_t len_dma_tx;
> > +       void *tx;
> > +       void *tx_end;
> > +       void *rx;
> > +       void *rx_end;
> > +       bool dma_initialized;
> > +       int dma_mapped;
> > +       dma_addr_t rx_dma;
> > +       dma_addr_t tx_dma;
> > +       u8 n_bytes;
> > +       int (*write)(struct ssp_driver_context *drv_context);
> > +       int (*read)(struct ssp_driver_context *drv_context);
> > +
> > +       struct intel_mid_dma_slave    dmas_tx;
> > +       struct intel_mid_dma_slave    dmas_rx;
> > +       struct dma_chan    *txchan;
> > +       struct dma_chan    *rxchan;
> > +       struct workqueue_struct *dma_wq;
> > +       struct work_struct complete_work;
> > +
> > +       u8 __iomem *virt_addr_sram_tx;
> > +       u8 __iomem *virt_addr_sram_rx;
> > +
> > +       int txdma_done;
> > +       int rxdma_done;
> > +       struct callback_param tx_param;
> > +       struct callback_param rx_param;
> 
> With kerneldoc it's easier to tell what the usecase is for these
> callbacks.
> 
> > +       struct pci_dev *dmac1;
> 
> It seems that something like a pci_dev * should be used
> to refer to the I2C and SRAM as well?
> 
> > +
> > +       unsigned long quirks;
> > +       u32 rx_fifo_threshold;
> > +};
> > +
> > +struct chip_data {
> > +       u32 cr0;
> > +       u32 cr1;
> > +       u32 timeout;
> > +       u8 n_bytes;
> > +       u8 dma_enabled;
> 
> bool?
> 
Yes.
> > +       u8 bits_per_word;
> > +       u32 speed_hz;
> 
> Should that be u32? unsigned int seems more apropriate for a frequency.
> 
> > +       int (*write)(struct ssp_driver_context *drv_context);
> > +       int (*read)(struct ssp_driver_context *drv_context);
> > +};
> 
> kerneldoc me.
> 
> > +enum intel_mid_ssp_spi_fifo_burst {
> > +       IMSS_FIFO_BURST_1,
> > +       IMSS_FIFO_BURST_4,
> > +       IMSS_FIFO_BURST_8
> > +};
> > +
> > +/* spi_board_info.controller_data for SPI slave devices,
> > + * copied to spi_device.platform_data ... mostly for dma tuning
> > + */
> > +struct intel_mid_ssp_spi_chip {
> > +       enum intel_mid_ssp_spi_fifo_burst burst_size;
> > +       u32 timeout;
> > +       u8 enable_loopback;
> > +       u8 dma_enabled;
> 
> The last two entries looks like they should be bool.
> 
Yes.
> > +};
> 
> kerneldoc.
> 
> > +#define SPI_DIB_NAME_LEN  16
> > +#define SPI_DIB_SPEC_INFO_LEN      10
> > +
> > +struct spi_dib_header {
> > +       u32       signature;
> > +       u32       length;
> > +       u8         rev;
> > +       u8         checksum;
> > +       u8         dib[0];
> > +} __packed;
> 
> Why is this packed?
> 
It's unused, so delete it in later version.
> Yours,
> Linus Walleij



------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-18  5:47     ` chao bi
@ 2012-12-20 15:32       ` Linus Walleij
  2013-01-09  4:25       ` Vinod Koul
  1 sibling, 0 replies; 26+ messages in thread
From: Linus Walleij @ 2012-12-20 15:32 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w, Vinod Koul,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, Dan Williams,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-VuQAYsv1563Yd54FQh9/CA

On Tue, Dec 18, 2012 at 6:47 AM, chao bi <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

> Thanks for your kind comments. Seems you were viewing the 1st version, I've
> submitted 2nd version and to deliver the 3rd version soon, will include you
> for review.

OK I'll look over it as it comes along...

>> > +       WARN(i > 0, "%d words flush occured\n", i);
>>
>> WARN really? Why not dev_warn()?
>
> It's suppose that SPI FIFO is empty after each transfer, so call flush()
> before each time of transfer, if any remain data in SPI FIFO,
> It shows some kinds of Error must be happened in last transfer and
> deserves a WARN() to record it.

dev_warning() is recorded, I don't get it.

>> > +static int null_writer(struct ssp_driver_context *drv_context)
>> > +static int null_reader(struct ssp_driver_context *drv_context)
>> > +static int u8_writer(struct ssp_driver_context *drv_context)
>> > +static int u8_reader(struct ssp_driver_context *drv_context)
>> > +static int u16_writer(struct ssp_driver_context *drv_context)
>> > +static int u16_reader(struct ssp_driver_context *drv_context)
>> > +static int u32_writer(struct ssp_driver_context *drv_context)
>> > +static int u32_reader(struct ssp_driver_context *drv_context)
>>
>> These seem to all be designed to return 0 or 1 and should then be
>> bool. It seems strange actually, you would expect that such a
>> function returns the number of bytes or words read/written.
>
> these functions are only used by PIO transfer, its name shows how many
> bytes to be written/read, and to return whether write/read is success.
> If returns bytes of read/written seems make no sense for PIO transfer,
> so I think return type of boot is enough, what do you think?

If the return value is succes/error it should return zero for
success and < 0 for error. IIRC these returned 1 on error which
is then wrong. Keep them as int but return something negative
when the function fails please.

>> > +               length = TRUNCATE(drv_context->len,
>> > +                       drv_context->rx_fifo_threshold * drv_context->n_bytes);
(...)
>> It looks very strange.
>>
>> Isn't this simply an arithmetic soup construction to say:
>>
>> length = drv_context->len / (drv_context->rx_fifo_threshold *
>> drv_context->n_bytes);
>
> I think TRUNCATE() is different:
> #define TRUNCATE(x, a) ((x) & ~((a)-1))

Yes I got it all wrong. I have some problem with this
because of the name of the function I think.

Example for 32bit arithmetic:

TRUNCATE(0x1010, 0x100) ==
(0x00001010 & ~(0x00000100-1)) ==
(0x00001010 & ~(0x000000FF)) ==
(0x00001010 & 0xFFFFFFFF00) ==
0x00001000

It's quite unintuitive to call something that preserve the
upper bits and discards the lower bits to "truncate",
usually we use that word for things like removing
trailing zeroes like writing 0x10 instead of 0x00000010.

Should it not be named HIGH_N_BITS()
or something like that?

I have considered addin this to <linux/bitops.h>:
#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))

So in this case you would instead of

y = TRUNCATE(x, a);

Use something like:

y = x & BITS(a, 31);

And in that case it's also obvious what happens (IMO)

>> Uusally likely() / unlikely() micro-optimization is discouraged,
>> do you have specific performance numbers for using it so
>> much here?
>
> I haven't done any sort of performance test for likely/unlikely, but per
> our test on Medfield Platform, it meets our performance request. By the
> way, would you please illustrate why likely/unlikely is discouraged. If
> it impacts on performance, maybe we could consider to propose another
> enhancement patch to optimize performance, but this should based on
> test.

Please consult the following:
http://lwn.net/Articles/420019/
http://lwn.net/Articles/70476/

Quoting Rusty Russell:

"Sometimes, unlikely()/likely() help code readability.  But generally it
should be considered the register keyword of the 2000's: if the case isn't
ABSOLUTELY CRYSTAL CLEAR, or doesn't show up on benchmarks, distain is
appropriate."

>> > +       /* We can fall here when not using DMA mode */
>>
>> fall? fail?
>>
> I think it's "fall"

Do you mean "fall through"?

>> > +       if (drv_context->quirks & QUIRKS_BIT_BANGING) {
>> > +               /* Bit banging on the clock is done through */
>> > +               /* DFT which is available through I2C.      */
>> > +               /* get base address of I2C_Serbus registers */
>> > +               drv_context->I2C_paddr = 0xff12b000;
>>
>> What on earth is this?
>>
>> Note the comment says "get base address", you're not getting it at
>> all, you're hardcoding it. Resources like this should be passed in from
>> the outside.
>>
>
> Ok, the address is fixed for current platform, I'll define the address
> at beginning and here just refer to the macro, if other platform, this
> address should be defined as other value.

That really does not answer the question why it is not passed as
a resource from the outside.

>> > +#define MAX_TRAILING_BYTE_RETRY 16
>> > +#define MAX_TRAILING_BYTE_LOOP 100
>>
>> Max iterations?
>>
>> > +#define DELAY_TO_GET_A_WORD 3
>> > +#define DFLT_TIMEOUT_VAL 500
>>
>> milliseconds?
>
> It depends on peripheral clock frequency.

So then write in a comment that it's the number of clock cycles
or something?

>> > +#define SRAM_BASE_ADDR 0xfffdc000
>>
>> Should be passed as resource, se above reasoning for the
>> "I2C" base address. What happens on next ASIC spin when
>> the engineer move this base offset etc, don't you have any
>> system discovery?
>
> This is fix value for Moorestown & Medfield platforms as what is
> declared in the file header. If any hardware change, the address should
> be changed accordantly.

I don't get it. That's not how we usually do this kind of things.
We usually pass it as a resource. Doesn't the Moorestown/Medfield
devices have a central resource registry of any kind?

We've got all kind of crap for encoding things into the ARM plaforms
like this, so let's not repeat that mistake.

Yours,
Linus Walleij

------------------------------------------------------------------------------
LogMeIn Rescue: Anywhere, Anytime Remote support for IT. Free Trial
Remotely access PCs and mobile devices and provide instant support
Improve your efficiency, and focus on delivering more value-add services
Discover what IT Professionals Know. Rescue delivers
http://p.sf.net/sfu/logmein_12329d2d

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
  2012-12-18  5:47     ` chao bi
  2012-12-20 15:32       ` Linus Walleij
@ 2013-01-09  4:25       ` Vinod Koul
       [not found]         ` <20130109042535.GL19691-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
  1 sibling, 1 reply; 26+ messages in thread
From: Vinod Koul @ 2013-01-09  4:25 UTC (permalink / raw)
  To: chao bi
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w, Linus Walleij,
	Dan Williams, spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	alan-b5Z7lJ3WibVrdx17CPfAsdBPR1lH4CV8

On Tue, Dec 18, 2012 at 01:47:40PM +0800, chao bi wrote:
> Dear Linus,
> Thanks for your kind comments. Seems you were viewing the 1st version, I've
> submitted 2nd version and to deliver the 3rd version soon, will include you
> for review.
Was the third version posted?

Also I have some questions on this approach. Is this driver for SSP ip or SPI
ip, looks like latter. In both the cases there are some existing drivers in
kernel and adding one more IMHO doesnt make sense. What we really need a
common core for dw IP and SSP IP (i think pxa uses same stuff). That way lot of
code will get reduced from driver
> > 
> > > +#define SRAM_BASE_ADDR 0xfffdc000
> > 
> > Should be passed as resource, se above reasoning for the
> > "I2C" base address. What happens on next ASIC spin when
> > the engineer move this base offset etc, don't you have any
> > system discovery?
> This is fix value for Moorestown & Medfield platforms as what is
> declared in the file header. If any hardware change, the address should
> be changed accordantly.
Why do you wnat to change this latter, pls add it as a resouce or since this is
a PCI device you can use PCI table driver data. 

Also why would SSP care about SRAM, I am not sure I follow it??
Lastly if you have dedicated SRAM for your use, it should be in PCI BAR and not
hard coded like this!!

--
~Vinod

------------------------------------------------------------------------------
Master Java SE, Java EE, Eclipse, Spring, Hibernate, JavaScript, jQuery
and much more. Keep your Java skills current with LearnJavaNow -
200+ hours of step-by-step video tutorials by Java experts.
SALE $49.99 this month only -- learn more at:
http://p.sf.net/sfu/learnmore_122612 

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH] SPI: SSP SPI Controller driver
       [not found]         ` <20130109042535.GL19691-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
@ 2013-01-10 11:52           ` Linus Walleij
  0 siblings, 0 replies; 26+ messages in thread
From: Linus Walleij @ 2013-01-10 11:52 UTC (permalink / raw)
  To: Vinod Koul
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w,
	alan-b5Z7lJ3WibVrdx17CPfAsdBPR1lH4CV8, chao bi, Dan Williams,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f

On Wed, Jan 9, 2013 at 5:25 AM, Vinod Koul <vinod.koul-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> wrote:

> Also I have some questions on this approach. Is this driver for SSP ip or SPI
> ip, looks like latter. In both the cases there are some existing drivers in
> kernel and adding one more IMHO doesnt make sense. What we really need a
> common core for dw IP and SSP IP (i think pxa uses same stuff). That way lot of
> code will get reduced from driver

+1 on this comment, I didn't even notice :-(

Linus Walleij

------------------------------------------------------------------------------
Master Visual Studio, SharePoint, SQL, ASP.NET, C# 2012, HTML5, CSS,
MVC, Windows 8 Apps, JavaScript and much more. Keep your skills current
with LearnDevNow - 3,200 step-by-step video tutorials by Microsoft
MVPs and experts. ON SALE this month only -- learn more at:
http://p.sf.net/sfu/learnmore_122712

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH] SPI: SSP SPI Controller driver
@ 2012-11-06  9:11 chao bi
  0 siblings, 0 replies; 26+ messages in thread
From: chao bi @ 2012-11-06  9:11 UTC (permalink / raw)
  To: grant.likely-s3s/WqlpOiPyB63q8FvJNQ
  Cc: jun.d.chen-ral2JQCrhuEAvxtiuMwx3w,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
	ken.k.mills-ral2JQCrhuEAvxtiuMwx3w,
	sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w


This patch is to implement SSP SPI controller driver, which has been applied and
validated on intel Moorestown & Medfield platform. The patch are originated by
Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
and to be further developed by Channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> and Chen Jun
<jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> according to their integration & validation on Medfield platform.

Signed-off-by: Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: channing <chao.bi-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Chen Jun <jun.d.chen-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
 drivers/spi/Kconfig                   |    9 +
 drivers/spi/Makefile                  |    1 +
 drivers/spi/spi-intel-mid-ssp.c       | 1428 +++++++++++++++++++++++++++++++++
 include/linux/spi/spi-intel-mid-ssp.h |  326 ++++++++
 4 files changed, 1764 insertions(+), 0 deletions(-)
 create mode 100644 drivers/spi/spi-intel-mid-ssp.c
 create mode 100644 include/linux/spi/spi-intel-mid-ssp.h

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae35..0c3a559 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -179,6 +179,15 @@ config SPI_IMX
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel MID platforms (EXPERIMENTAL)"
+	depends on SPI_MASTER && INTEL_MID_DMAC && EXPERIMENTAL
+	help
+	  This is the unified SSP SPI master controller driver for
+	  the Intel MID platforms, handling Moorestown & Medfield,
+	  master clock mode.
+	  It supports Bulverde SSP core.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47..83f06d0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_FSL_ESPI)		+= spi-fsl-espi.o
 obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= spi-intel-mid-ssp.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
diff --git a/drivers/spi/spi-intel-mid-ssp.c b/drivers/spi/spi-intel-mid-ssp.c
new file mode 100644
index 0000000..b3f6197
--- /dev/null
+++ b/drivers/spi/spi-intel-mid-ssp.c
@@ -0,0 +1,1428 @@
+/*
+ * spi-intel-mid-ssp.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/module.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-intel-mid-ssp.h>
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static const struct pci_device_id pci_ids[];
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+	int tlen1 = (len < sz ? len : sz);
+	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
+	unsigned char *p;
+	static char msg[MAX_SPI_TRANSFER_SIZE];
+
+	memset(msg, '\0', sizeof(msg));
+	p = buf;
+	while (p < buf + tlen1)
+		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+	if (tlen2 > 0) {
+		sprintf(msg, "%s .....", msg);
+		p = (buf+len) - tlen2;
+		while (p < buf + len)
+			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+	}
+
+	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+		   len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+	u32 sssr;
+	sssr = read_SSSR(drv_context->ioaddr);
+	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+		return 0;
+	else
+		return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+	return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u32 i = 0;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (!is_tx_fifo_empty(drv_context)) {
+		dev_err(&drv_context->pdev->dev,
+				"TX FIFO not empty. Reset of SPI IF");
+		disable_interface(drv_context);
+		return;
+	}
+
+	dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+	while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
+		read_SSDR(reg);
+		i++;
+	}
+	WARN(i > 0, "%d words flush occured\n", i);
+
+	return;
+}
+
+static int null_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u8 n_bytes = drv_context->n_bytes;
+
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(0, reg);
+	drv_context->tx += n_bytes;
+
+	return 1;
+}
+
+static int null_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u8 n_bytes = drv_context->n_bytes;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		read_SSDR(reg);
+		drv_context->rx += n_bytes;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u8_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u8 *)(drv_context->tx), reg);
+	++drv_context->tx;
+
+	return 1;
+}
+
+static int u8_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u8 *)(drv_context->rx) = read_SSDR(reg);
+		++drv_context->rx;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u16_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u16 *)(drv_context->tx), reg);
+	drv_context->tx += 2;
+
+	return 1;
+}
+
+static int u16_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u16 *)(drv_context->rx) = read_SSDR(reg);
+		drv_context->rx += 2;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u32_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u32 *)(drv_context->tx), reg);
+	drv_context->tx += 4;
+
+	return 1;
+}
+
+static int u32_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u32 *)(drv_context->rx) = read_SSDR(reg);
+		drv_context->rx += 4;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct ssp_driver_context *drv_context =
+		(struct ssp_driver_context *)param;
+	bool ret = false;
+
+	if (!drv_context->dmac1)
+		return ret;
+
+	if (chan->device->dev == &drv_context->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @drv_context:	Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (!drv_context->dma_mapped)
+		return;
+	dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
+		PCI_DMA_FROMDEVICE);
+	dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
+		PCI_DMA_TODEVICE);
+	drv_context->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg:	Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+	struct callback_param *cb_param = (struct callback_param *)arg;
+	struct ssp_driver_context *drv_context = cb_param->drv_context;
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+
+	if (cb_param->direction == TX_DIRECTION)
+		drv_context->txdma_done = 1;
+	else
+		drv_context->rxdma_done = 1;
+
+	dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+		cb_param->direction, drv_context->rxdma_done,
+		drv_context->txdma_done);
+
+	if (drv_context->txdma_done && drv_context->rxdma_done) {
+		/* Clear Status Register */
+		write_SSSR(drv_context->clear_sr, reg);
+		dev_dbg(dev, "DMA done\n");
+		/* Disable Triggers to DMA or to CPU*/
+		disable_triggers(drv_context);
+		unmap_dma_buffers(drv_context);
+
+		queue_work(drv_context->dma_wq, &drv_context->complete_work);
+	}
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @drv_context:	Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	struct dma_slave_config *ds;
+	dma_cap_mask_t mask;
+	struct device *dev = &drv_context->pdev->dev;
+	unsigned int device_id;
+
+	/* Configure RX channel parameters */
+	rxs = &drv_context->dmas_rx;
+	ds = &rxs->dma_slave;
+
+	ds->direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	ds->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ds->src_addr_width = drv_context->n_bytes;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (drv_context->rx_fifo_threshold == 8) {
+		ds->src_maxburst = 8;
+		ds->dst_maxburst = 8;
+	} else if (drv_context->rx_fifo_threshold == 4) {
+		ds->src_maxburst = 4;
+		ds->dst_maxburst = 4;
+	} else {
+		ds->src_maxburst = 1;
+		ds->dst_maxburst = 1;
+	}
+
+	/* Configure TX channel parameters */
+	txs = &drv_context->dmas_tx;
+	ds = &txs->dma_slave;
+
+	ds->direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	ds->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ds->dst_addr_width = drv_context->n_bytes;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (drv_context->rx_fifo_threshold == 8) {
+		ds->src_maxburst = 8;
+		ds->dst_maxburst = 8;
+	} else if (drv_context->rx_fifo_threshold == 4) {
+		ds->src_maxburst = 4;
+		ds->dst_maxburst = 4;
+	} else {
+		ds->src_maxburst = 1;
+		ds->dst_maxburst = 1;
+	}
+
+	/* Nothing more to do if already initialized */
+	if (drv_context->dma_initialized)
+		return;
+
+	/* Use DMAC1 */
+	if (drv_context->quirks & QUIRKS_PLATFORM_MRST)
+		device_id = PCI_MRST_DMAC1_ID;
+	else
+		device_id = PCI_MDFL_DMAC1_ID;
+
+	drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+							device_id, NULL);
+
+	if (!drv_context->dmac1) {
+		dev_err(dev, "Can't find DMAC1");
+		return;
+	}
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+		drv_context->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+				2 * MAX_SPI_TRANSFER_SIZE);
+		if (drv_context->virt_addr_sram_rx)
+			drv_context->virt_addr_sram_tx =
+				drv_context->virt_addr_sram_rx +
+				MAX_SPI_TRANSFER_SIZE;
+		else
+			dev_err(dev, "Virt_addr_sram_rx is null\n");
+	}
+
+	/* 1. Allocate rx channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	drv_context->rxchan = dma_request_channel(mask, chan_filter,
+		drv_context);
+	if (!drv_context->rxchan)
+		goto err_exit;
+
+	drv_context->rxchan->private = rxs;
+
+	/* 2. Allocate tx channel */
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	drv_context->txchan = dma_request_channel(mask, chan_filter,
+		drv_context);
+
+	if (!drv_context->txchan)
+		goto free_rxchan;
+	else
+		drv_context->txchan->private = txs;
+
+	/* set the dma done bit to 1 */
+	drv_context->txdma_done = 1;
+	drv_context->rxdma_done = 1;
+
+	drv_context->tx_param.drv_context  = drv_context;
+	drv_context->tx_param.direction = TX_DIRECTION;
+	drv_context->rx_param.drv_context  = drv_context;
+	drv_context->rx_param.direction = RX_DIRECTION;
+
+	drv_context->dma_initialized = 1;
+
+	return;
+
+free_rxchan:
+	dma_release_channel(drv_context->rxchan);
+err_exit:
+	dev_err(dev, "Error : DMA Channel Not available\n");
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(drv_context->virt_addr_sram_rx);
+
+	pci_dev_put(drv_context->dmac1);
+	return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @drv_context:	Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
+{
+	dma_release_channel(drv_context->txchan);
+	dma_release_channel(drv_context->rxchan);
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(drv_context->virt_addr_sram_rx);
+
+	pci_dev_put(drv_context->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @drv_context:	Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_driver_context *drv_context)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &drv_context->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
+
+	if (drv_context->tx_dma)
+		drv_context->txdma_done = 0;
+
+	if (drv_context->rx_dma)
+		drv_context->rxdma_done = 0;
+
+	/* 2. prepare the RX dma transfer */
+	txchan = drv_context->txchan;
+	rxchan = drv_context->rxchan;
+
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* Since the DMA is configured to do 32bits access */
+		/* to/from the DDR, the DMA transfer size must be  */
+		/* a multiple of 4 bytes                           */
+		drv_context->len_dma_rx = drv_context->len & ~(4 - 1);
+		drv_context->len_dma_tx = drv_context->len_dma_rx;
+
+		/* In Rx direction, TRAIL Bytes are handled by memcpy */
+		if (drv_context->rx_dma &&
+			(drv_context->len_dma_rx >
+			drv_context->rx_fifo_threshold * drv_context->n_bytes))
+			drv_context->len_dma_rx =
+					TRUNCATE(drv_context->len_dma_rx,
+					drv_context->rx_fifo_threshold *
+					drv_context->n_bytes);
+		else if (!drv_context->rx_dma)
+			dev_err(dev, "ERROR : rx_dma is null\r\n");
+	} else {
+		/* TRAIL Bytes are handled by DMA */
+		if (drv_context->rx_dma) {
+			drv_context->len_dma_rx = drv_context->len;
+			drv_context->len_dma_tx = drv_context->len;
+		} else {
+			dev_err(dev, "ERROR : drv_context->rx_dma is null!\n");
+		}
+	}
+
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,				/* DMA Channel */
+		drv_context->rx_dma,			/* DAR */
+		ssdr_addr,				/* SAR */
+		drv_context->len_dma_rx,		/* Data Length */
+		flag);					/* Flag */
+
+	if (rxdesc) {
+		rxdesc->callback = intel_mid_ssp_spi_dma_done;
+		rxdesc->callback_param = &drv_context->rx_param;
+	} else {
+		dev_dbg(dev, "rxdesc is null! (len_dma_rx:%zd)\n",
+			drv_context->len_dma_rx);
+		drv_context->rxdma_done = 1;
+	}
+
+	/* 3. prepare the TX dma transfer */
+	if (drv_context->tx_dma) {
+		txdesc = txchan->device->device_prep_dma_memcpy
+		(txchan,				/* DMA Channel */
+		ssdr_addr,				/* DAR */
+		drv_context->tx_dma,			/* SAR */
+		drv_context->len_dma_tx,		/* Data Length */
+		flag);					/* Flag */
+		if (txdesc) {
+			txdesc->callback = intel_mid_ssp_spi_dma_done;
+			txdesc->callback_param = &drv_context->tx_param;
+		} else {
+			dev_dbg(dev, "txdesc is null! (len_dma_tx:%zd)\n",
+				drv_context->len_dma_tx);
+			drv_context->txdma_done = 1;
+		}
+	} else {
+		dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
+		return;
+	}
+
+	dev_info(dev, "DMA transfer len:%zd len_dma_tx:%zd len_dma_rx:%zd\n",
+		drv_context->len, drv_context->len_dma_tx,
+		drv_context->len_dma_rx);
+
+	if (rxdesc || txdesc) {
+		if (rxdesc) {
+			dev_dbg(dev, "Firing DMA RX channel\n");
+			rxdesc->tx_submit(rxdesc);
+		}
+		if (txdesc) {
+			dev_dbg(dev, "Firing DMA TX channel\n");
+			txdesc->tx_submit(txdesc);
+		}
+	} else {
+		struct callback_param cb_param;
+		cb_param.drv_context = drv_context;
+		dev_dbg(dev, "Bypassing DMA transfer\n");
+		intel_mid_ssp_spi_dma_done(&cb_param);
+	}
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @drv_context:	Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (unlikely(drv_context->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped\n");
+		return 0;
+	}
+	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+		/* Copy drv_context->tx into sram_tx */
+		memcpy_toio(drv_context->virt_addr_sram_tx, drv_context->tx,
+			drv_context->len);
+#ifdef DUMP_RX
+		dump_trailer(&drv_context->pdev->dev, drv_context->tx,
+			drv_context->len, 16);
+#endif
+		drv_context->rx_dma = SRAM_RX_ADDR;
+		drv_context->tx_dma = SRAM_TX_ADDR;
+	} else {
+		/* no QUIRKS_SRAM_ADDITIONAL_CPY */
+		if (unlikely(drv_context->dma_mapped))
+			return 1;
+
+		drv_context->tx_dma =
+			dma_map_single(dev, drv_context->tx, drv_context->len,
+				PCI_DMA_TODEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_context->tx_dma))) {
+			dev_err(dev, "ERROR : tx dma mapping failed\n");
+			return 0;
+		}
+
+		drv_context->rx_dma =
+			dma_map_single(dev, drv_context->rx, drv_context->len,
+				PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_context->rx_dma))) {
+			dma_unmap_single(dev, drv_context->tx_dma,
+				drv_context->len, DMA_TO_DEVICE);
+			dev_err(dev, "ERROR : rx dma mapping failed\n");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @drv_context:	Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+
+	if (drv_context->len != drv_context->len_dma_rx) {
+		dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+			read_SSSR(reg));
+		drv_context->rx += drv_context->len_dma_rx;
+		drv_context->tx += drv_context->len_dma_tx;
+
+		while ((drv_context->tx != drv_context->tx_end) ||
+			(drv_context->rx != drv_context->rx_end)) {
+			drv_context->read(drv_context);
+			drv_context->write(drv_context);
+		}
+	}
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @drv_context:	Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
+{
+	u32 length = drv_context->len;
+
+	if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+		&& (drv_context->len > drv_context->rx_fifo_threshold *
+		drv_context->n_bytes))
+		length = TRUNCATE(drv_context->len,
+			drv_context->rx_fifo_threshold * drv_context->n_bytes);
+
+	memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	struct spi_message *msg;
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+		pm_qos_update_request(&drv_context->pm_qos_req,
+					PM_QOS_DEFAULT_VALUE);
+
+	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+		sram_to_ddr_cpy(drv_context);
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+		drain_trail(drv_context);
+	else
+		/* Stop getting Time Outs */
+		write_SSTO(0, reg);
+
+	drv_context->cur_msg->status = 0;
+	drv_context->cur_msg->actual_length = drv_context->len;
+
+#ifdef DUMP_RX
+	dump_trailer(dev, drv_context->rx, drv_context->len, 16);
+#endif
+
+	dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+	msg = drv_context->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+	struct ssp_driver_context *drv_context = container_of(work,
+				struct ssp_driver_context, complete_work);
+
+	int_transfer_complete(drv_context);
+}
+
+static void poll_transfer_complete(struct ssp_driver_context *drv_context)
+{
+	struct spi_message *msg;
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_context->cur_msg->actual_length +=
+		drv_context->len - (drv_context->rx_end - drv_context->rx);
+
+	drv_context->cur_msg->status = 0;
+
+	msg = drv_context->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct ssp_driver_context *drv_context = dev_id;
+	void *reg = drv_context->ioaddr;
+	struct device *dev = &drv_context->pdev->dev;
+	u32 status = read_SSSR(reg);
+
+	/* It should never be our interrupt since SSP will */
+	/* only trigs interrupt for under/over run.        */
+	if (likely(!(status & drv_context->mask_sr)))
+		return IRQ_NONE;
+
+	if (status & SSSR_ROR || status & SSSR_TUR) {
+		dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",	status);
+		WARN_ON(1);
+		if (status & SSSR_ROR)
+			dev_err(dev, "we have Overrun\n");
+		if (status & SSSR_TUR)
+			dev_err(dev, "we have Underrun\n");
+	}
+
+	/* We can fall here when not using DMA mode */
+	if (!drv_context->cur_msg) {
+		disable_interface(drv_context);
+		disable_triggers(drv_context);
+	}
+	/* clear status register */
+	write_SSSR(drv_context->clear_sr, reg);
+	return IRQ_HANDLED;
+}
+
+static void poll_transfer(unsigned long data)
+{
+	struct ssp_driver_context *drv_context =
+		(struct ssp_driver_context *)data;
+
+	if (drv_context->tx)
+		while (drv_context->tx != drv_context->tx_end) {
+#ifdef CONFIG_X86_MRFLD
+			/* [REVERT ME] Tangier simulator requires a delay */
+			if (intel_mrfl_identify_sim() ==
+				INTEL_MRFL_CPU_SIMULATION_VP)
+				udelay(10);
+#endif /* CONFIG_X86_MRFLD */
+			drv_context->write(drv_context);
+			drv_context->read(drv_context);
+		}
+
+	while (!drv_context->read(drv_context))
+		cpu_relax();
+
+	poll_transfer_complete(drv_context);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @drv_context:	Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_driver_context *drv_context)
+{
+	u32 sssr;
+	u32 count = 0;
+	u32 cr0;
+	void *i2c_reg = drv_context->I2C_ioaddr;
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+	struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
+	cr0 = chip->cr0;
+
+	dev_warn(dev, "In %s : Starting bit banging\n",\
+		__func__);
+	if (read_SSSR(reg) & SSP_NOT_SYNC)
+		dev_warn(dev, "SSP clock desynchronized.\n");
+	if (!(read_SSCR0(reg) & SSCR0_SSE))
+		dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+	dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+	write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+	write_SSPSP(0x02010007, reg);
+
+	write_SSTO(chip->timeout, reg);
+	write_SSCR0(cr0, reg);
+
+	/*
+	*  This routine uses the DFx block to override the SSP inputs
+	*  and outputs allowing us to bit bang SSPSCLK. On Langwell,
+	*  we have to generate the clock to clear busy.
+	*/
+	write_I2CDATA(0x3, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070034, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CDATA(0x00000099, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	sssr = read_SSSR(reg);
+
+	/* Bit bang the clock until CSS clears */
+	while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+		write_I2CDATA(0x2, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CDATA(0x3, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		sssr = read_SSSR(reg);
+		count++;
+	}
+	if (count >= MAX_BITBANGING_LOOP)
+		dev_err(dev,
+			"ERROR in %s : infinite loop on bit banging. Aborting\n",
+			__func__);
+
+	dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+	write_I2CDATA(0x0, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(int speed)
+{
+	return max(100000000 / speed, 4) - 1;
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi:	Pointer to the spi_device struct
+ * @msg:	Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct ssp_driver_context *drv_context = \
+	spi_master_get_devdata(spi->master);
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = drv_context->ioaddr;
+	u32 cr1;
+	struct device *dev = &drv_context->pdev->dev;
+	chip = spi_get_ctldata(msg->spi);
+
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	drv_context->cur_msg = msg;
+
+	/* We handle only one transfer message since the protocol module has to
+	   control the out of band signaling. */
+	transfer = list_entry(msg->transfers.next,
+					struct spi_transfer,
+					transfer_list);
+
+	/* Check transfer length */
+	if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+		(transfer->len == 0))) {
+		dev_warn(dev, "transfer length null or greater than %d\n",
+			MAX_SPI_TRANSFER_SIZE);
+		dev_warn(dev, "length = %d\n", transfer->len);
+		msg->status = -EINVAL;
+
+		if (msg->complete)
+			msg->complete(msg->context);
+
+		return 0;
+	}
+
+	/* Flush any remaining data (in case of failed previous transfer) */
+	flush(drv_context);
+
+	drv_context->tx  = (void *)transfer->tx_buf;
+	drv_context->rx  = (void *)transfer->rx_buf;
+	drv_context->len = transfer->len;
+	drv_context->write = chip->write;
+	drv_context->read = chip->read;
+
+	if (likely(chip->dma_enabled)) {
+		drv_context->dma_mapped = map_dma_buffers(drv_context);
+		if (unlikely(!drv_context->dma_mapped))
+			return 0;
+	} else {
+		drv_context->write = drv_context->tx ?
+			chip->write : null_writer;
+		drv_context->read  = drv_context->rx ?
+			chip->read : null_reader;
+	}
+	drv_context->tx_end = drv_context->tx + transfer->len;
+	drv_context->rx_end = drv_context->rx + transfer->len;
+
+/* [REVERT ME] Bug in status register clear for Tangier simulation */
+#ifdef CONFIG_X86_MRFLD
+	if (intel_mrfl_identify_sim() != INTEL_MRFL_CPU_SIMULATION_VP)
+		write_SSSR(drv_context->clear_sr, reg);
+#else
+	/* Clear status  */
+	write_SSSR(drv_context->clear_sr, reg);
+#endif /* CONFIG_X86_MRFLD */
+
+	/* setup the CR1 control register */
+	cr1 = chip->cr1 | drv_context->cr1_sig;
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* in case of len smaller than burst size, adjust the RX     */
+		/* threshold. All other cases will use the default threshold */
+		/* value. The RX fifo threshold must be aligned with the DMA */
+		/* RX transfer size, which may be limited to a multiple of 4 */
+		/* bytes due to 32bits DDR access.                           */
+		if  (drv_context->len / drv_context->n_bytes <=
+			drv_context->rx_fifo_threshold) {
+			u32 rx_fifo_threshold;
+
+			rx_fifo_threshold = (drv_context->len & ~(4 - 1)) /
+				drv_context->n_bytes;
+			cr1 &= ~(SSCR1_RFT);
+			cr1 |= SSCR1_RxTresh(rx_fifo_threshold)
+					& SSCR1_RFT;
+		} else {
+			write_SSTO(chip->timeout, reg);
+		}
+	}
+
+	dev_dbg(dev,
+		"transfer len:%zd  n_bytes:%d  cr0:%x  cr1:%x",
+		drv_context->len, drv_context->n_bytes, chip->cr0, cr1);
+
+	/* first set CR1 */
+	write_SSCR1(cr1, reg);
+
+	/* Do bitbanging only if SSP not-enabled or not-synchronized */
+	if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+		(!(read_SSCR0(reg) & SSCR0_SSE))) &&
+		(drv_context->quirks & QUIRKS_BIT_BANGING))) {
+			start_bitbanging(drv_context);
+	} else {
+		/* (re)start the SSP */
+		write_SSCR0(chip->cr0, reg);
+	}
+
+	if (likely(chip->dma_enabled)) {
+		if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+			pm_qos_update_request(&drv_context->pm_qos_req,
+				MIN_EXIT_LATENCY);
+		dma_transfer(drv_context);
+	} else {
+		tasklet_schedule(&drv_context->poll_transfer);
+	}
+
+	return 0;
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi:	Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+	struct ssp_driver_context *drv_context =
+		spi_master_get_devdata(spi->master);
+	u32 tx_fifo_threshold;
+	u32 burst_size;
+	u32 clk_div;
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+	if ((spi->bits_per_word < MIN_BITS_PER_WORD
+		|| spi->bits_per_word > MAX_BITS_PER_WORD))
+		return -EINVAL;
+
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev,
+			"failed setup: can't allocate chip data\n");
+			return -ENOMEM;
+		}
+	}
+	chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+		spi->bits_per_word - 16 : spi->bits_per_word)
+			| SSCR0_SSE
+			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+	/* protocol drivers may change the chip settings, so...  */
+	/* if chip_info exists, use it                           */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	chip->cr1 = 0;
+	if (chip_info) {
+		burst_size = chip_info->burst_size;
+		if (burst_size > IMSS_FIFO_BURST_8)
+			burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->timeout = chip_info->timeout;
+
+		if (chip_info->enable_loopback)
+			chip->cr1 |= SSCR1_LBM;
+
+		chip->dma_enabled = chip_info->dma_enabled;
+
+	} else {
+		/* if no chip_info provided by protocol driver, */
+		/* set default values                           */
+		dev_info(&spi->dev, "setting default chip values\n");
+
+		burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->dma_enabled = 1;
+		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			chip->timeout = 0;
+		else
+			chip->timeout = DFLT_TIMEOUT_VAL;
+	}
+	/* Set FIFO thresholds according to burst_size */
+	if (burst_size == IMSS_FIFO_BURST_8)
+		drv_context->rx_fifo_threshold = 8;
+	else if (burst_size == IMSS_FIFO_BURST_4)
+		drv_context->rx_fifo_threshold = 4;
+	else
+		drv_context->rx_fifo_threshold = 1;
+	tx_fifo_threshold = SPI_FIFO_SIZE - drv_context->rx_fifo_threshold;
+	chip->cr1 |= (SSCR1_RxTresh(drv_context->rx_fifo_threshold) &
+		SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) &
+		SSCR1_TFT);
+
+	drv_context->dma_mapped = 0;
+
+	/* setting phase and polarity. spi->mode comes from boardinfo */
+	if ((spi->mode & SPI_CPHA) != 0)
+		chip->cr1 |= SSCR1_SPH;
+	if ((spi->mode & SPI_CPOL) != 0)
+		chip->cr1 |= SSCR1_SPO;
+
+	if (drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
+		/* set slave mode */
+		chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
+	chip->cr1 |= SSCR1_SCFR;        /* clock is not free running */
+
+	dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
+		spi->bits_per_word,
+		spi->mode & 0x3);
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+		chip->read = u8_reader;
+		chip->write = u8_writer;
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+		chip->read = u16_reader;
+		chip->write = u16_writer;
+	} else if (spi->bits_per_word <= 32) {
+		chip->cr0 |= SSCR0_EDSS;
+		chip->n_bytes = 4;
+		chip->read = u32_reader;
+		chip->write = u32_writer;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize\n");
+		return -EINVAL;
+	}
+
+	if ((drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
+		chip->speed_hz = spi->max_speed_hz;
+		clk_div = ssp_get_clk_div(chip->speed_hz);
+		chip->cr0 |= clk_div << 8;
+	}
+	chip->bits_per_word = spi->bits_per_word;
+
+	spi_set_ctldata(spi, chip);
+
+	/* setup of drv_context members that will not change across transfers */
+	drv_context->n_bytes = chip->n_bytes;
+
+	if (chip->dma_enabled) {
+		intel_mid_ssp_spi_dma_init(drv_context);
+		drv_context->cr1_sig  = SSCR1_TSRE | SSCR1_RSRE;
+		drv_context->mask_sr  = SSSR_ROR | SSSR_TUR;
+		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			drv_context->cr1_sig  |= SSCR1_TRAIL;
+	} else {
+		drv_context->cr1_sig = SSCR1_RIE | SSCR1_TIE | SSCR1_TINTE;
+		drv_context->mask_sr = SSSR_RFS | SSSR_TFS |
+				 SSSR_ROR | SSSR_TUR | SSSR_TINT;
+	}
+	drv_context->clear_sr = SSSR_TUR  | SSSR_ROR | SSSR_TINT;
+
+	return 0;
+}
+
+/**
+ * cleanup() - Driver cleanup procedure
+ * @spi:	Pointer to the spi_device struct
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct ssp_driver_context *drv_context =
+		spi_master_get_devdata(spi->master);
+
+	if (drv_context->dma_initialized)
+		intel_mid_ssp_spi_dma_exit(drv_context);
+
+	/* Remove the PM_QOS request */
+	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_remove_request(&drv_context->pm_qos_req);
+
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+/**
+ * intel_mid_ssp_spi_probe() - Driver probe procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @ent:	Pointer to the pci_device_id struct
+ */
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct ssp_driver_context *drv_context = 0;
+	int status;
+	u32 iolen = 0;
+	u8 ssp_cfg;
+	int pos;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+
+	/* Check if the SSP we are probed for has been allocated */
+	/* to operate as SPI. This information is retreived from */
+	/* the field adid of the Vendor-Specific PCI capability  */
+	/* which is used as a configuration register.            */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability\n");
+		goto err_abort_probe;
+	}
+
+/* [REVERT ME] SPI mode bit not configured correctly in tangier simulation */
+#ifdef CONFIG_X86_MRFLD
+	if (intel_mrfl_identify_sim() == INTEL_MRFL_CPU_SIMULATION_VP &&
+		(PCI_FUNC(pdev->devfn) == 1)) {
+		/* override */
+		ssp_cfg |= SSP_CFG_SPI_MODE_ID;
+	}
+#endif /* CONFIG_X86_MRFLD */
+
+	if (SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)\n",
+			ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(dev, "found PCI SSP controller(ID: %04xh:%04xh cfg: %02xh)\n",
+		pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for drv_context and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_slave\n");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	drv_context = spi_master_get_devdata(master);
+	drv_context->master = master;
+
+	drv_context->pdev = pdev;
+	drv_context->quirks = ent->driver_data;
+
+	/* Set platform & configuration quirks */
+	if (drv_context->quirks & QUIRKS_PLATFORM_MRST) {
+		/* Apply bit banging workarround on MRST */
+		drv_context->quirks |= QUIRKS_BIT_BANGING;
+		/* MRST slave mode workarrounds */
+		if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
+			drv_context->quirks |=
+				QUIRKS_USE_PM_QOS |
+				QUIRKS_SRAM_ADDITIONAL_CPY;
+	}
+	drv_context->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
+	if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
+		drv_context->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
+
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
+	master->num_chipselect = 1;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
+	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
+
+	drv_context->dma_initialized = 0;
+
+	/* get basic io resource and map it */
+	drv_context->paddr = pci_resource_start(pdev, 0);
+	iolen = pci_resource_len(pdev, 0);
+
+	status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
+	if (status)
+		goto err_free_1;
+
+	drv_context->ioaddr =
+		ioremap_nocache(drv_context->paddr, iolen);
+	if (!drv_context->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", drv_context->paddr);
+	dev_dbg(dev, "ioaddr = : %p\n", drv_context->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
+	dev_dbg(dev, "quirks = : %08lx\n", drv_context->quirks);
+
+	if (drv_context->quirks & QUIRKS_BIT_BANGING) {
+		/* Bit banging on the clock is done through */
+		/* DFT which is available through I2C.      */
+		/* get base address of I2C_Serbus registers */
+		drv_context->I2C_paddr = 0xff12b000;
+		drv_context->I2C_ioaddr =
+			ioremap_nocache(drv_context->I2C_paddr, 0x10);
+		if (!drv_context->I2C_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_3;
+		}
+	}
+
+	/* Attach to IRQ */
+	drv_context->irq = pdev->irq;
+	status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", drv_context);
+	if (status < 0) {
+		dev_err(&pdev->dev, "can not get IRQ\n");
+		goto err_free_4;
+	}
+
+	if (drv_context->quirks & QUIRKS_PLATFORM_MDFL) {
+		/* get base address of DMA selector. */
+		syscfg = drv_context->paddr - SYSCFG;
+		syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+		if (!syscfg_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_5;
+		}
+		iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+	}
+
+	tasklet_init(&drv_context->poll_transfer, poll_transfer,
+		(unsigned long)drv_context);
+
+	/* Register with the SPI framework */
+	dev_info(dev, "register with SPI framework (bus spi%d)\n",
+		master->bus_num);
+
+	status = spi_register_master(master);
+
+	if (status != 0) {
+		dev_err(dev, "problem registering spi\n");
+		goto err_free_5;
+	}
+
+	pci_set_drvdata(pdev, drv_context);
+
+	/* Create the PM_QOS request */
+	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_add_request(&drv_context->pm_qos_req,
+		PM_QOS_CPU_DMA_LATENCY,
+		PM_QOS_DEFAULT_VALUE);
+
+	return status;
+
+err_free_5:
+	free_irq(drv_context->irq, drv_context);
+err_free_4:
+	iounmap(drv_context->I2C_ioaddr);
+err_free_3:
+	iounmap(drv_context->ioaddr);
+err_free_2:
+	pci_release_region(pdev, 0);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+/**
+ * intel_mid_ssp_spi_remove() - driver remove procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+
+	if (!drv_context)
+		return;
+
+	/* Release IRQ */
+	free_irq(drv_context->irq, drv_context);
+
+	iounmap(drv_context->ioaddr);
+	if (drv_context->quirks & QUIRKS_BIT_BANGING)
+		iounmap(drv_context->I2C_ioaddr);
+
+	/* disconnect from the SPI framework */
+	spi_unregister_master(drv_context->master);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * intel_mid_ssp_spi_suspend() - Driver suspend procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @state:	pm_message_t
+ */
+static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+	dev_dbg(&pdev->dev, "suspend\n");
+
+	tasklet_disable(&drv_context->poll_transfer);
+
+	return 0;
+}
+
+/**
+ * intel_mid_ssp_spi_resume() - Driver resume procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+	dev_dbg(&pdev->dev, "resume\n");
+
+	tasklet_enable(&drv_context->poll_transfer);
+
+	return 0;
+}
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#endif /* CONFIG_PM */
+
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+	/* MRST SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0815), QUIRKS_PLATFORM_MRST},
+	/* MDFL SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0832), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP1 */
+	{ PCI_VDEVICE(INTEL, 0x0825), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP3 */
+	{ PCI_VDEVICE(INTEL, 0x0816), QUIRKS_PLATFORM_MDFL},
+	/* MRFL SSP5 */
+	{ PCI_VDEVICE(INTEL, 0x1194), 0},
+	{},
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	__devexit_p(intel_mid_ssp_spi_remove),
+	.suspend =	intel_mid_ssp_spi_suspend,
+	.resume =	intel_mid_ssp_spi_resume,
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+
+module_exit(intel_mid_ssp_spi_exit);
+
diff --git a/include/linux/spi/spi-intel-mid-ssp.h b/include/linux/spi/spi-intel-mid-ssp.h
new file mode 100644
index 0000000..1b90b75
--- /dev/null
+++ b/include/linux/spi/spi-intel-mid-ssp.h
@@ -0,0 +1,326 @@
+/*
+ *  Copyright (C) Intel 2009
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *  Sylvain Centelles <sylvain.centelles-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_H_
+#define INTEL_MID_SSP_SPI_H_
+
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+#define PCI_MRST_DMAC1_ID	0x0814
+#define PCI_MDFL_DMAC1_ID	0x0827
+
+#define SSP_NOT_SYNC 0x400000
+#define MAX_SPI_TRANSFER_SIZE 8192
+#define MAX_BITBANGING_LOOP   10000
+#define SPI_FIFO_SIZE 16
+
+/* PM QoS define */
+#define MIN_EXIT_LATENCY 20
+
+/* SSP assignement configuration from PCI config */
+#define SSP_CFG_GET_MODE(ssp_cfg)	((ssp_cfg) & 0x07)
+#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)	(((ssp_cfg) >> 3) & 0x07)
+#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)	((ssp_cfg) & 0x40)
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* Driver's quirk flags */
+/* This workarround bufferizes data in the audio fabric SDRAM from  */
+/* where the DMA transfers will operate. Should be enabled only for */
+/* SPI slave mode.                                                  */
+#define QUIRKS_SRAM_ADDITIONAL_CPY	1
+/* If set the trailing bytes won't be handled by the DMA.           */
+/* Trailing byte feature not fully available.                       */
+#define QUIRKS_DMA_USE_NO_TRAIL		2
+/* If set, the driver will use PM_QOS to reduce the latency         */
+/* introduced by the deeper C-states which may produce over/under   */
+/* run issues. Must be used in slave mode. In master mode, the      */
+/* latency is not critical, but setting this workarround  may       */
+/* improve the SPI throughput.                                      */
+#define QUIRKS_USE_PM_QOS		4
+/* This quirks is set on Moorestown                                 */
+#define QUIRKS_PLATFORM_MRST		8
+/* This quirks is set on Medfield                                   */
+#define QUIRKS_PLATFORM_MDFL		16
+/* If set, the driver will apply the bitbanging workarround needed  */
+/* to enable defective Langwell stepping A SSP. The defective SSP   */
+/* can be enabled only once, and should never be disabled.          */
+#define QUIRKS_BIT_BANGING		32
+/* If set, SPI is in slave clock mode                               */
+#define QUIRKS_SPI_SLAVE_CLOCK_MODE	64
+
+/* Uncomment to get RX and TX short dumps after each transfer */
+/* #define DUMP_RX 1 */
+#define MAX_TRAILING_BYTE_RETRY 16
+#define MAX_TRAILING_BYTE_LOOP 100
+#define DELAY_TO_GET_A_WORD 3
+#define DFLT_TIMEOUT_VAL 500
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+#define RX_DIRECTION 0
+#define TX_DIRECTION 1
+
+#define I2C_ACCESS_USDELAY 10
+
+#define DFLT_BITS_PER_WORD 16
+#define MIN_BITS_PER_WORD     4
+#define MAX_BITS_PER_WORD     32
+#define DFLT_FIFO_BURST_SIZE	IMSS_FIFO_BURST_8
+
+#define TRUNCATE(x, a) ((x) & ~((a)-1))
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+
+DEFINE_SSP_REG(I2CCTRL, 0x00);
+DEFINE_SSP_REG(I2CDATA, 0x04);
+
+DEFINE_SSP_REG(GPLR1, 0x04);
+DEFINE_SSP_REG(GPDR1, 0x0c);
+DEFINE_SSP_REG(GPSR1, 0x14);
+DEFINE_SSP_REG(GPCR1, 0x1C);
+DEFINE_SSP_REG(GAFR1_U, 0x44);
+
+#define SYSCFG  0x20bc0
+
+#define SRAM_BASE_ADDR 0xfffdc000
+#define SRAM_RX_ADDR   SRAM_BASE_ADDR
+#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
+#define SSCR0_NCS   (1 << 21)           /* Network clock select */
+#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
+#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF		(1 << 2)	/* Tx FIFO Not Full */
+#define SSSR_RNE		(1 << 3)	/* Rx FIFO Not Empty */
+#define SSSR_BSY		(1 << 4)	/* SSP Busy */
+#define SSSR_TFS		(1 << 5)	/* Tx FIFO Service Request */
+#define SSSR_RFS		(1 << 6)	/* Rx FIFO Service Request */
+#define SSSR_ROR		(1 << 7)	/* Rx FIFO Overrun */
+#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
+
+#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
+#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
+
+#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
+#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
+#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
+
+#define SSSR_BCE         (1 << 23) /* Bit Count Error */
+#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC         (1 << 20) /* End Of Chain */
+#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
+#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
+#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+struct callback_param {
+	void *drv_context;
+	u32 direction;
+};
+
+struct ssp_driver_context {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void *ioaddr;
+	int irq;
+
+	/* I2C registers */
+	dma_addr_t I2C_paddr;
+	void *I2C_ioaddr;
+
+	/* SSP masks*/
+	u32 cr1_sig;
+	u32 cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+	/* PM_QOS request */
+	struct pm_qos_request pm_qos_req;
+
+	struct tasklet_struct poll_transfer;
+
+	spinlock_t lock;
+
+	/* Current message transfer state info */
+	struct spi_message *cur_msg;
+	size_t len;
+	size_t len_dma_rx;
+	size_t len_dma_tx;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	bool dma_initialized;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	u8 n_bytes;
+	int (*write)(struct ssp_driver_context *drv_context);
+	int (*read)(struct ssp_driver_context *drv_context);
+
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan    *txchan;
+	struct dma_chan    *rxchan;
+	struct workqueue_struct *dma_wq;
+	struct work_struct complete_work;
+
+	u8 __iomem *virt_addr_sram_tx;
+	u8 __iomem *virt_addr_sram_rx;
+
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+	struct pci_dev *dmac1;
+
+	unsigned long quirks;
+	u32 rx_fifo_threshold;
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 timeout;
+	u8 n_bytes;
+	u8 dma_enabled;
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct ssp_driver_context *drv_context);
+	int (*read)(struct ssp_driver_context *drv_context);
+};
+
+
+enum intel_mid_ssp_spi_fifo_burst {
+	IMSS_FIFO_BURST_1,
+	IMSS_FIFO_BURST_4,
+	IMSS_FIFO_BURST_8
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	enum intel_mid_ssp_spi_fifo_burst burst_size;
+	u32 timeout;
+	u8 enable_loopback;
+	u8 dma_enabled;
+};
+
+
+#define SPI_DIB_NAME_LEN  16
+#define SPI_DIB_SPEC_INFO_LEN      10
+
+struct spi_dib_header {
+	u32       signature;
+	u32       length;
+	u8         rev;
+	u8         checksum;
+	u8         dib[0];
+} __packed;
+
+#endif /*INTEL_MID_SSP_SPI_H_*/
-- 
1.7.1




------------------------------------------------------------------------------
LogMeIn Central: Instant, anywhere, Remote PC access and management.
Stay in control, update software, and manage PCs from one command center
Diagnose problems and improve visibility into emerging IT issues
Automate, monitor and manage. Do more in less time with Central
http://p.sf.net/sfu/logmein12331_d2d

^ permalink raw reply related	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2013-01-10 11:52 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-11-21  2:16 [PATCH] SPI: SSP SPI Controller driver chao bi
2012-11-21 12:08 ` Shubhrajyoti Datta
     [not found]   ` <CAM=Q2cvoEMScnCmfrhoAueZ8bfPCX90TxZmsSigfeRbGeXbzMA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2012-11-22  3:26     ` Bi, Chao
     [not found]       ` <253F3AA5ECB4EC43A2CA0147545F67F2102B5D40-0J0gbvR4kTiiAffOGbnezLfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2012-11-22  6:54         ` Shubhrajyoti Datta
     [not found]           ` <CAM=Q2cszn_OoTyYiUVSj3NvpxJq+wSUnMJVcwWOdV2EzDviLVw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2012-11-22  8:13             ` Bi, Chao
2012-11-21 12:14 ` Shubhrajyoti Datta
     [not found]   ` <CAM=Q2cu6ReS-6sJxdacnw=FYGdoFed9bM1gA6yFEtmVjs8KQTA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2012-11-21 12:26     ` Alan Cox
     [not found]       ` <20121121122630.13fc2087-Z/y2cZnRghHXmaaqVzeoHQ@public.gmane.org>
2012-11-22  7:01         ` Shubhrajyoti Datta
     [not found]           ` <CAM=Q2cuCZni2DyzDux-E5H4-djgNrUURTYJ+f=_oMBeJE7eGMw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2012-11-22 11:04             ` Alan Cox
2012-12-06 12:38 ` Grant Likely
2012-12-06 14:19   ` Alan Cox
     [not found]     ` <20121206141938.0100f06f-Z/y2cZnRghHXmaaqVzeoHQ@public.gmane.org>
2012-12-11 14:30       ` Jun Chen
2012-12-11  2:00   ` chao bi
2012-12-11 16:36     ` Grant Likely
2012-12-11  8:58   ` chao bi
2012-12-11 16:46     ` Grant Likely
2012-12-13  9:09       ` chao bi
2012-12-16 21:32         ` Grant Likely
2012-12-17  8:24           ` chao bi
2012-12-17  8:58     ` Linus Walleij
2012-12-17 11:23 ` Linus Walleij
     [not found]   ` <CACRpkdad3fHxWRpRqD-eP8-sKKexN+s-JZCT6XLggv92Q=5kMA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2012-12-18  5:47     ` chao bi
2012-12-20 15:32       ` Linus Walleij
2013-01-09  4:25       ` Vinod Koul
     [not found]         ` <20130109042535.GL19691-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2013-01-10 11:52           ` Linus Walleij
  -- strict thread matches above, loose matches on Subject: below --
2012-11-06  9:11 chao bi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).