All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011
       [not found] <[PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011>
@ 2011-02-02 21:01 ` Russ Gorby
  2011-02-12  9:19   ` Grant Likely
  2011-02-02 21:01 ` Russ Gorby
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 13+ messages in thread
From: Russ Gorby @ 2011-02-02 21:01 UTC (permalink / raw)
  To: David Brownell, Grant Likely, open list:SPI SUBSYSTEM, open list

Hello SPI maintainers,
I am sending a patch for the (new) intel_mid_ssp_spi driver for
consideration for inclusion in the Linux Kernel. This is a SPI master
controller driver that is being used for the intel MID platform (Medfield).
It uses the on-board Bulverde SSP controller configured for SPI (spibus #3)
running at 25Mhz.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011
       [not found] <[PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011>
  2011-02-02 21:01 ` [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011 Russ Gorby
@ 2011-02-02 21:01 ` Russ Gorby
  2011-02-02 21:01 ` [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform Russ Gorby
  2011-02-02 21:01 ` Russ Gorby
  3 siblings, 0 replies; 13+ messages in thread
From: Russ Gorby @ 2011-02-02 21:01 UTC (permalink / raw)
  To: David Brownell, Grant Likely,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uuRhgaa4a2kL

Hello SPI maintainers,
I am sending a patch for the (new) intel_mid_ssp_spi driver for
consideration for inclusion in the Linux Kernel. This is a SPI master
controller driver that is being used for the intel MID platform (Medfield).
It uses the on-board Bulverde SSP controller configured for SPI (spibus #3)
running at 25Mhz.

------------------------------------------------------------------------------
Special Offer-- Download ArcSight Logger for FREE (a $49 USD value)!
Finally, a world-class log management solution at an even better price-free!
Download using promo code Free_Logger_4_Dev2Dev. Offer expires 
February 28th, so secure your free ArcSight Logger TODAY! 
http://p.sf.net/sfu/arcsight-sfd2d

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
       [not found] <[PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011>
                   ` (2 preceding siblings ...)
  2011-02-02 21:01 ` [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform Russ Gorby
@ 2011-02-02 21:01 ` Russ Gorby
  2011-02-02 21:03   ` Mark Brown
                     ` (2 more replies)
  3 siblings, 3 replies; 13+ messages in thread
From: Russ Gorby @ 2011-02-02 21:01 UTC (permalink / raw)
  To: David Brownell, Grant Likely, open list:SPI SUBSYSTEM, open list

SPI master controller driver for the Intel MID platform Medfield
This driver uses the Penwell SSP controller and configures it to
be a SPI device (spibus 3). This bus supports a single device -
the 3G SPI modem that can operate up to 25Mhz.

Signed-off-by: Russ Gorby <russ.gorby@intel.com>
---
 drivers/spi/Kconfig                 |    7 +
 drivers/spi/Makefile                |    1 +
 drivers/spi/intel_mid_ssp_spi.c     | 1507 +++++++++++++++++++++++++++++++++++
 drivers/spi/intel_mid_ssp_spi_def.h |  139 ++++
 4 files changed, 1654 insertions(+), 0 deletions(-)
 create mode 100644 drivers/spi/intel_mid_ssp_spi.c
 create mode 100644 drivers/spi/intel_mid_ssp_spi_def.h

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bb233a9..60ba339 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -178,6 +178,13 @@ config SPI_IMX
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel Medfield platform"
+	depends on SPI_MASTER && INTEL_MID_DMAC
+	help
+	  This is the SPI master controller driver for the Intel
+	  Medfield MID platform.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 86d1b5f..c64deb9 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_SPI_SH_SCI)		+= spi_sh_sci.o
 obj-$(CONFIG_SPI_SH_MSIOF)		+= spi_sh_msiof.o
 obj-$(CONFIG_SPI_STMP3XXX)		+= spi_stmp.o
 obj-$(CONFIG_SPI_NUC900)		+= spi_nuc900.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)         += intel_mid_ssp_spi.o
 
 # special build for s3c24xx spi driver with fiq support
 spi_s3c24xx_hw-y			:= spi_s3c24xx.o
diff --git a/drivers/spi/intel_mid_ssp_spi.c b/drivers/spi/intel_mid_ssp_spi.c
new file mode 100644
index 0000000..19c62bc
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi.c
@@ -0,0 +1,1507 @@
+/*
+ *  intel_mid_ssp_spi.c - Penwell SPI master controller driver
+ *  based on pxa2xx.c
+ *
+ *  Copyright (C) Intel 2010
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Russ Gorby <russ.gorby@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_qos_params.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include "intel_mid_ssp_spi_def.h"
+
+#define DRIVER_NAME		"intel_mid_ssp_spi"
+#define PCI_DMAC_MAXDI		2047
+#define PCI_DMAC_ID		0x0827
+/* PM QoS define */
+#define MIN_EXIT_LATENCY	20
+
+#define TESTMODE_COMMON_MASK	0x00ff
+#define TESTMODE_PRIV_MASK	0xff00
+#define TESTMODE_ENABLE_DMA	0x01
+#define TESTMODE_ENABLE_POLL	0x02
+#define TESTMODE_ENABLE_LOOPBACK 0x04
+#define TESTMODE_ENABLE_INTR	0x08
+#define TESTMODE(x)		(testmode & x)
+static unsigned int testmode = (TESTMODE_ENABLE_DMA | TESTMODE_ENABLE_POLL);
+
+module_param(testmode, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(testmode, "supply test mode bits");
+
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Penwell SPI3 Master Contoller");
+MODULE_LICENSE("GPL");
+
+#define RX_THRESH_DFLT		8
+#define TX_THRESH_DFLT		8
+#define TIMOUT_DFLT		1000
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define PNWL_SSPSP (SSPSP_FSRT | SSPSP_SFRMWDTH(1) | SSPSP_SFRMP | \
+		    SSPSP_SCMODE(3))
+
+/*
+ * clock divider
+ * 8 bpw
+ * TUR/ROR do not generate interrupt
+ * SPI mode operation
+ * SSP enabled
+ */
+#define PNWL_CR0(clk, bits, spi, chip)	\
+	((SSCR0_SerClkDiv(clk) & SSCR0_SCR) |				\
+	 SSCR0_Motorola |						\
+	 SSCR0_DataSize(bits > 16 ? bits - 16 : bits) |			\
+	 SSCR0_SSE |							\
+	 SSCR0_TIM |							\
+	 SSCR0_RIM |							\
+	 (bits > 16 ? SSCR0_EDSS : 0))
+
+#define PNWL_CR1_MASTER_ROLE	0
+#define PNWL_CR1_SLAVE_ROLE	(SSCR1_SFRMDIR | SSCR1_SCLKDIR)
+/* MRST SSP must be slave */
+#define PNWL_CR1_ROLE		PNWL_CR1_MASTER_ROLE
+#define PNWL_CR1(spi, chip)	\
+	  ((chip->enable_loopback ? SSCR1_LBM : 0) | \
+	  ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) | \
+	  ((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0) | \
+	  SSCR1_SCFR | \
+	  chip->threshold | \
+	  PNWL_CR1_ROLE)
+
+
+
+struct callback_param {
+	void *drv_data;
+	int *donep;
+};
+
+enum dd_pwrstate {
+	PWRSTATE_ON = 1,
+	PWRSTATE_IDLE,
+	PWRSTATE_OFF,
+};
+
+enum dd_pwrflags {
+	PWRFLAG_RTRESUMING,
+};
+
+struct driver_data {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void __iomem *ioaddr;
+	u32 iolen;
+	int irq;
+
+	/* SSP masks*/
+	u32 dma_cr1;
+	u32 int_cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+
+	/* Current message transfer state info */
+	struct tasklet_struct poll_transfer;
+	struct spi_message *cur_msg;
+	size_t len;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	size_t rx_map_len;
+	size_t tx_map_len;
+	u8 n_bytes;
+	int (*write)(struct driver_data *drv_data);
+	int (*read)(struct driver_data *drv_data);
+	irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+	void (*cs_control)(u32 command);
+	struct workqueue_struct *wq;
+	struct work_struct resume_transfer_work;
+
+	/* controller state */
+	int dma_inited;
+
+	/* pwrstate mgmt */
+	int pwrstate;		/* enum dd_pwrstate */
+	unsigned long pwrflags;	/* enum dd_pwrflags */
+
+	/* used by DMA code */
+	struct pci_dev *dmac1;
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan	   *txchan;
+	struct dma_chan	   *rxchan;
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 psp;
+	u32 timeout;
+	u8 n_bytes;
+	u32 threshold;
+	u8 enable_dma;		/* use dma if possible */
+	u8 poll_mode;		/* use poll mode */
+	u8 enable_loopback;	/* configure in loopback mode */
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct driver_data *drv_data);
+	int (*read)(struct driver_data *drv_data);
+};
+
+static int transfer(struct spi_device *, struct spi_message *);
+
+static inline int have_fifo_data(struct driver_data *drv_data, u32 *sssrp)
+{
+	u32 sssr;
+	void *reg = drv_data->ioaddr;
+	sssr = ioread32(reg + SSSR);
+
+	if (sssrp)
+		*sssrp = sssr;
+	return ((sssr & SSSR_TFL) || !(sssr & SSSR_TNF)) ||
+		((sssr & SSSR_RFL) != SSSR_RFL || (sssr & SSSR_RNE));
+}
+
+static void flush(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u32 sssr;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (have_fifo_data(drv_data, &sssr)) {
+		dev_warn(&drv_data->pdev->dev,
+			 "ERROR: flush: fifos not empty! sssr:%x", sssr);
+		iowrite32(ioread32(reg + SSCR0) & ~SSCR0_SSE, reg + SSCR0);
+		return;
+	}
+
+	iowrite32(SSSR_ROR, reg + SSSR);
+	iowrite32(SSSR_TUR, reg + SSSR);
+}
+
+/*
+ * reader/writer functions
+ *
+ * *_reader functions return:
+ *	0: not complete (data not available)
+ *	1: *all* requested data has been read
+ *
+ * *_writer functions return:
+ *	1: data successfully writen
+ *	0: *all* requested data already written *or* full condition hit
+ *	note: this means caller must verify write-complete condition
+ *
+ */
+static int null_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u8 n_bytes = drv_data->n_bytes;
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end))
+		return 0;
+
+	iowrite32(0, reg + SSDR);
+	drv_data->tx += n_bytes;
+
+	return 1;
+}
+
+static int null_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u8 n_bytes = drv_data->n_bytes;
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE) &&
+	       (drv_data->rx < drv_data->rx_end)) {
+
+		ioread32(reg + SSDR);
+		drv_data->rx += n_bytes;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+static int u8_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end))
+		return 0;
+
+	iowrite32(*(u8 *)(drv_data->tx), reg + SSDR);
+	dev_dbg(&drv_data->pdev->dev, "u8_write: %x", ((u8 *)drv_data->tx)[0]);
+	drv_data->tx++;
+
+	return 1;
+}
+
+static int u8_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE)
+	       && (drv_data->rx < drv_data->rx_end)) {
+
+		*(u8 *)(drv_data->rx) = ioread32(reg + SSDR);
+		drv_data->rx++;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+static int u16_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end))
+		return 0;
+
+	iowrite32(*(u16 *)(drv_data->tx), reg + SSDR);
+	drv_data->tx += 2;
+
+	return 1;
+}
+
+static int u16_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	dev_dbg(&drv_data->pdev->dev, "u16_read");
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE)
+	       && (drv_data->rx < drv_data->rx_end)) {
+
+		*(u16 *)(drv_data->rx) = ioread32(reg + SSDR);
+		drv_data->rx += 2;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+static int u32_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	dev_dbg(&drv_data->pdev->dev, "u32_write");
+
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end)) {
+		return 0;
+	}
+
+	iowrite32(*(u32 *)(drv_data->tx), reg + SSDR);
+	drv_data->tx += 4;
+
+	return 1;
+}
+
+static int u32_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE)
+	       && (drv_data->rx < drv_data->rx_end)) {
+
+		*(u32 *)(drv_data->rx) = ioread32(reg + SSDR);
+		drv_data->rx += 4;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+
+/* caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct driver_data *drv_data)
+{
+	struct spi_message *msg;
+
+	msg = drv_data->cur_msg;
+	drv_data->cur_msg = NULL;
+	msg->state = NULL;
+	if (msg->complete)
+		msg->complete(msg->context);
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct driver_data *drv_data = (struct driver_data *)param;
+	bool ret = false;
+
+	if (!drv_data->dmac1)
+		return ret;
+
+	if (chan->device->dev == &drv_data->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @drv_data:		Pointer to the private driver data
+ */
+static void unmap_dma_buffers(struct driver_data *drv_data,
+			      struct spi_message *msg)
+{
+	struct device *dev = &drv_data->pdev->dev;
+
+	if (unlikely(!drv_data->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers not mapped");
+		return;
+	}
+	if (unlikely(msg->is_dma_mapped))
+		return;
+
+	dma_unmap_single(dev, drv_data->rx_dma, drv_data->len, DMA_FROM_DEVICE);
+	dma_unmap_single(dev, drv_data->tx_dma, drv_data->len, DMA_TO_DEVICE);
+	drv_data->dma_mapped = 0;
+}
+
+
+static void dma_transfer_complete(void *arg)
+{
+	struct callback_param *param = arg;
+	struct driver_data *drv_data = (struct driver_data *)param->drv_data;
+	int *done;
+	void *reg;
+	u32 sscr1;
+
+	done = (int *)param->donep;
+	reg = drv_data->ioaddr;
+	*done = 1;
+
+	if (!drv_data->txdma_done || !drv_data->rxdma_done)
+		return;
+
+	/* Clear Status Register */
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+
+	sscr1 = ioread32(reg + SSCR1);
+
+	/* Disable Triggers to DMA */
+	sscr1 &= ~drv_data->dma_cr1;
+
+	/* Disable Interrupt */
+	sscr1 &= ~drv_data->int_cr1;
+	iowrite32(sscr1, reg + SSCR1);
+
+	/* Stop getting Time Outs */
+	iowrite32(0, reg + SSTO);
+
+	/* release DMA mappings */
+	unmap_dma_buffers(drv_data, drv_data->cur_msg);
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_data->cur_msg->actual_length = drv_data->len;
+
+	drv_data->cur_msg->status = 0;
+	giveback(drv_data);
+	pm_runtime_put(&drv_data->pdev->dev);
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @drv_data:		Pointer to the private driver data
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct driver_data *drv_data)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	dma_cap_mask_t mask;
+
+	if (drv_data->dma_inited)
+		return;
+
+	drv_data->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DMAC_ID,
+					 NULL);
+	if (!drv_data->dmac1) {
+		dev_warn(&drv_data->pdev->dev, "Can't find DMAC %x",
+			 PCI_DMAC_ID);
+		return;
+	}
+
+	/* 1. init rx channel */
+	rxs = &drv_data->dmas_rx;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	rxs->dma_slave.direction = DMA_FROM_DEVICE;
+	rxs->dma_slave.src_maxburst = LNW_DMA_MSIZE_8;
+	rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	rxs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	drv_data->rxchan = dma_request_channel(mask, chan_filter, drv_data);
+	if (!drv_data->rxchan)
+		goto err_exit;
+	drv_data->rxchan->private = rxs;
+
+	/* 2. init tx channel */
+	txs = &drv_data->dmas_tx;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	txs->dma_slave.direction = DMA_TO_DEVICE;
+	txs->dma_slave.dst_maxburst = LNW_DMA_MSIZE_8;
+	txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	txs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	drv_data->txchan = dma_request_channel(mask, chan_filter, drv_data);
+	if (!drv_data->txchan)
+		goto free_rxchan;
+	drv_data->txchan->private = txs;
+
+	drv_data->dma_inited = 1;
+	drv_data->txdma_done = 1;
+	drv_data->rxdma_done = 1;
+
+	drv_data->tx_param.drv_data = (void *)drv_data;
+	drv_data->tx_param.donep = &drv_data->txdma_done;
+	drv_data->rx_param.drv_data = (void *)drv_data;
+	drv_data->rx_param.donep = &drv_data->rxdma_done;
+	return;
+
+free_rxchan:
+	dev_err(&drv_data->pdev->dev, "DMA TX Channel Not available");
+	dma_release_channel(drv_data->rxchan);
+err_exit:
+	dev_err(&drv_data->pdev->dev, "DMA RX Channel Not available");
+	pci_dev_put(drv_data->dmac1);
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @drv_data:		Pointer to the private driver data
+ */
+static void intel_mid_ssp_spi_dma_exit(struct driver_data *drv_data)
+{
+	if (!drv_data->dma_inited)
+		return;
+	dma_release_channel(drv_data->txchan);
+	dma_release_channel(drv_data->rxchan);
+	pci_dev_put(drv_data->dmac1);
+	drv_data->dma_inited = 0;
+}
+
+static void dma_transfer(struct driver_data *drv_data)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct dma_slave_config *txconf, *rxconf;
+	struct device *dev = &drv_data->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(drv_data->paddr + 0x10);
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	/* 2. Prepare the RX dma transfer	- DMA_FROM_DEVICE */
+	if (drv_data->rx_dma) {
+		rxconf = &drv_data->dmas_rx.dma_slave;
+		rxconf->src_addr = drv_data->rx_dma;
+		rxchan = drv_data->rxchan;
+		rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+					       (unsigned long) rxconf);
+		rxdesc = rxchan->device->device_prep_dma_memcpy
+			(rxchan,		/* DMA Channel */
+			 drv_data->rx_dma,	/* DAR */
+			 ssdr_addr,		/* SAR */
+			 drv_data->len,		/* Data Length */
+			 flag);			/* Flag */
+		if (!rxdesc) {
+			dev_err(dev, "ERROR : rxdesc is null!");
+			return;
+		}
+		rxdesc->callback = dma_transfer_complete;
+		rxdesc->callback_param = &drv_data->rx_param;
+		drv_data->rxdma_done = 0;
+	}
+
+	/* 3. Prepare the TX dma transfer	-  DMA_TO_DEVICE */
+	if (drv_data->tx_dma) {
+		txconf = &drv_data->dmas_tx.dma_slave;
+		txconf->dst_addr = drv_data->rx_dma;
+		txchan = drv_data->txchan;
+		txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+					       (unsigned long) txconf);
+		txdesc = txchan->device->device_prep_dma_memcpy
+			(txchan,		/* DMA Channel */
+			 ssdr_addr,		/* DAR */
+			 drv_data->tx_dma,	/* SAR */
+			 drv_data->len,		/* Data Length */
+			 flag);			/* Flag */
+		if (!txdesc) {
+			dev_err(dev, "ERROR : txdesc is null!");
+			return;
+		}
+		txdesc->callback = dma_transfer_complete;
+		txdesc->callback_param = &drv_data->tx_param;
+		drv_data->txdma_done = 0;
+	}
+
+	if (rxdesc)
+		rxdesc->tx_submit(rxdesc);
+	if (txdesc)
+		txdesc->tx_submit(txdesc);
+
+}
+
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @drv_data:		Pointer to the private driver data
+ */
+static int map_dma_buffers(struct driver_data *drv_data,
+			   struct spi_message *msg,
+			   struct spi_transfer *transfer)
+{
+	struct device *dev = &drv_data->pdev->dev;
+
+	if (unlikely(drv_data->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped");
+		return 0;
+	}
+	if (unlikely(msg->is_dma_mapped)) {
+		drv_data->rx_dma = transfer->rx_dma;
+		drv_data->tx_dma = transfer->tx_dma;
+		return 1;
+	}
+	if (drv_data->len > PCI_DMAC_MAXDI * drv_data->n_bytes) {
+		/* if length is too long we revert to programmed I/O */
+		return 0;
+	}
+
+	if (likely(drv_data->rx)) {
+		drv_data->rx_dma =
+			dma_map_single(dev, drv_data->rx,
+				       drv_data->len, DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_data->rx_dma))) {
+			dev_err(dev, "ERROR : rx dma mapping failed");
+			return 0;
+		}
+	}
+	if (likely(drv_data->tx)) {
+		drv_data->tx_dma =
+			dma_map_single(dev, drv_data->tx,
+				       drv_data->len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_data->tx_dma))) {
+			dma_unmap_single(dev, drv_data->rx_dma,
+					 drv_data->len, DMA_FROM_DEVICE);
+			dev_err(dev, "ERROR : tx dma mapping failed");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static void set_dma_width(struct driver_data *drv_data, int bits)
+{
+	struct dma_slave_config *rxconf, *txconf;
+	rxconf = &drv_data->dmas_rx.dma_slave;
+	txconf = &drv_data->dmas_tx.dma_slave;
+
+	if (bits <= 8) {
+		rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	} else if (bits <= 16) {
+		rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	} else if (bits <= 32) {
+		rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	}
+}
+
+static void int_error_stop(struct driver_data *drv_data, const char* msg)
+{
+	void *reg = drv_data->ioaddr;
+
+	/* Stop and reset SSP */
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+	iowrite32(ioread32(reg + SSCR1) & ~drv_data->int_cr1, reg + SSCR1);
+	iowrite32(0, reg + SSTO);
+	flush(drv_data);
+
+	dev_err(&drv_data->pdev->dev, "%s", msg);
+
+}
+
+static void int_transfer_complete(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u32 sscr1;
+
+	dev_dbg(&drv_data->pdev->dev, "interrupt transfer complete");
+	/* Clear Status Register */
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+
+	sscr1 = ioread32(reg + SSCR1);
+
+	/* Disable Triggers to DMA */
+	sscr1 &= ~drv_data->dma_cr1;
+
+	/* Disable Interrupt */
+	sscr1 &= ~drv_data->int_cr1;
+	iowrite32(sscr1, reg + SSCR1);
+
+	/* Stop getting Time Outs */
+	iowrite32(0, reg + SSTO);
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_data->cur_msg->actual_length += drv_data->len -
+		(drv_data->rx_end - drv_data->rx);
+
+	drv_data->cur_msg->status = 0;
+	giveback(drv_data);
+	pm_runtime_put(&drv_data->pdev->dev);
+}
+
+static void transfer_complete(struct driver_data *drv_data)
+{
+	/* Update total byte transfered return count actual bytes read */
+	drv_data->cur_msg->actual_length +=
+		drv_data->len - (drv_data->rx_end - drv_data->rx);
+
+	drv_data->cur_msg->status = 0;
+	giveback(drv_data);
+	pm_runtime_put(&drv_data->pdev->dev);
+}
+
+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u32 irq_mask = (ioread32(reg + SSCR1) & SSCR1_TIE) ?
+		drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
+
+	u32 irq_status = ioread32(reg + SSSR) & irq_mask;
+	if (irq_status & SSSR_ROR) {
+		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
+		return IRQ_HANDLED;
+	}
+
+	if (irq_status & SSSR_TINT) {
+		iowrite32(SSSR_TINT, reg + SSSR);
+		if (drv_data->read(drv_data)) {
+			int_transfer_complete(drv_data);
+			return IRQ_HANDLED;
+		}
+	}
+
+	/* Drain rx fifo, Fill tx fifo and prevent overruns */
+	do {
+		if (drv_data->read(drv_data)) {
+			int_transfer_complete(drv_data);
+			return IRQ_HANDLED;
+		}
+	} while (drv_data->write(drv_data));
+
+	if (drv_data->read(drv_data)) {
+		int_transfer_complete(drv_data);
+		return IRQ_HANDLED;
+	}
+
+	if (drv_data->tx == drv_data->tx_end)
+		iowrite32(ioread32(reg + SSCR1) & ~SSCR1_TIE, reg + SSCR1);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct driver_data *drv_data = dev_id;
+	void *reg = drv_data->ioaddr;
+	u32 status = ioread32(reg + SSSR);
+
+	if (status & (SSSR_ROR | SSSR_TUR)) {
+		dev_dbg(&drv_data->pdev->dev,
+			"--- SPI ROR or TUR Occred : SSSR=%x", status);
+
+		if (drv_data->dma_mapped) {
+			iowrite32(SSSR_ROR, reg + SSSR);	/* Clear ROR */
+			iowrite32(SSSR_TUR, reg + SSSR);	/* Clear TUR */
+			return IRQ_HANDLED;
+		}
+	}
+
+	/* just return if this is not our interrupt */
+	if (!(ioread32(reg + SSSR) & drv_data->mask_sr))
+		return IRQ_NONE;
+
+	if (!drv_data->cur_msg) {
+		iowrite32(ioread32(reg + SSCR0) & ~SSCR0_SSE, reg + SSCR0);
+		iowrite32(ioread32(reg + SSCR1) & ~drv_data->int_cr1,
+			reg + SSCR1);
+		iowrite32(drv_data->clear_sr, reg + SSSR);
+
+		/* Never fail */
+
+		return IRQ_HANDLED;
+	}
+
+	return drv_data->transfer_handler(drv_data);
+}
+
+static void poll_transfer(unsigned long data)
+{
+	struct driver_data *drv_data = (struct driver_data *)data;
+
+	if (drv_data->tx)
+		while (drv_data->tx != drv_data->tx_end) {
+			drv_data->write(drv_data);
+			drv_data->read(drv_data);
+	}
+
+	while (!drv_data->read(drv_data))
+		cpu_relax();
+
+	transfer_complete(drv_data);
+}
+
+static unsigned int ssp_get_clk_div(int speed)
+{
+	u32 clk_div;
+
+	/*
+	 * fabric clock: 100MHz
+	 * SSP clock: 25MHz max
+	 */
+	clk_div = max(100000000 / speed, 4);
+
+	return clk_div;
+}
+
+/**
+ * resume_transfer_work()	- resume from pm_runtime sleep then
+ *		perform transfer() work
+ */
+static void resume_transfer_work(struct work_struct *work)
+{
+	struct driver_data *drv_data = container_of(work, struct driver_data,
+						    resume_transfer_work);
+	struct spi_message *msg;
+
+	pm_runtime_get_sync(&drv_data->pdev->dev);
+	WARN_ON(drv_data->pwrstate != PWRSTATE_ON);
+	msg = drv_data->cur_msg;
+	drv_data->cur_msg = NULL;
+	transfer(msg->spi, msg);
+}
+
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = drv_data->ioaddr;
+	int truth;
+	u8 bits;
+	u32 clk_div;
+	u32 speed;
+	u32 cr0;
+	u32 cr1;
+
+	if (unlikely(drv_data->pwrstate == PWRSTATE_OFF)) {
+		dev_dbg(&drv_data->pdev->dev, "transfer: busy, pwrstate:%d",
+			drv_data->pwrstate);
+		return -EIO;
+	}
+	if (unlikely(drv_data->pwrstate == PWRSTATE_IDLE)) {
+		truth = test_and_set_bit(PWRFLAG_RTRESUMING,
+					 &drv_data->pwrflags);
+		if (truth > 0) {
+			WARN_ON(1);
+			return -EBUSY;
+		}
+		drv_data->cur_msg = msg;
+		queue_work(drv_data->wq, &drv_data->resume_transfer_work);
+		return 0;
+	}
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	drv_data->cur_msg = msg;
+
+	/* We handle only one transfer message since the protocol module has to
+	   control the out of band signaling. */
+	transfer = list_entry(msg->transfers.next, struct spi_transfer,
+			      transfer_list);
+
+	/* Setup the transfer state based on the type of transfer */
+	if (likely(!test_and_clear_bit(PWRFLAG_RTRESUMING,
+				       &drv_data->pwrflags)))
+		pm_runtime_get(&drv_data->pdev->dev);
+	chip = spi_get_ctldata(msg->spi);
+	flush(drv_data);
+	drv_data->n_bytes = chip->n_bytes;
+	drv_data->tx = (void *)transfer->tx_buf;
+	drv_data->tx_end = drv_data->tx + transfer->len;
+	drv_data->rx = transfer->rx_buf;
+	drv_data->rx_end = drv_data->rx + transfer->len;
+	drv_data->rx_dma = transfer->rx_dma;
+	drv_data->tx_dma = transfer->tx_dma;
+	drv_data->len = transfer->len;
+	drv_data->write = drv_data->tx ? chip->write : null_writer;
+	drv_data->read = drv_data->rx ? chip->read : null_reader;
+
+	/* Change speed and bit per word on a per transfer */
+	cr0 = chip->cr0;
+	if (transfer->speed_hz || transfer->bits_per_word) {
+
+		bits = chip->bits_per_word;
+		speed = chip->speed_hz;
+
+		if (transfer->speed_hz)
+			speed = transfer->speed_hz;
+
+		clk_div = ssp_get_clk_div(speed);
+
+		if (transfer->bits_per_word)
+			bits = transfer->bits_per_word;
+
+		if (bits <= 8) {
+			drv_data->n_bytes = 1;
+			drv_data->read = drv_data->read != null_reader ?
+				u8_reader : null_reader;
+			drv_data->write = drv_data->write != null_writer ?
+				u8_writer : null_writer;
+		} else if (bits <= 16) {
+			drv_data->n_bytes = 2;
+			drv_data->read = drv_data->read != null_reader ?
+				u16_reader : null_reader;
+			drv_data->write = drv_data->write != null_writer ?
+				u16_writer : null_writer;
+		} else if (bits <= 32) {
+			drv_data->n_bytes = 4;
+			drv_data->read = drv_data->read != null_reader ?
+				u32_reader : null_reader;
+			drv_data->write = drv_data->write != null_writer ?
+				u32_writer : null_writer;
+		}
+		if (likely(chip->enable_dma))
+			set_dma_width(drv_data, bits);
+		cr0 = PNWL_CR0(clk_div, bits, spi, chip);
+	}
+
+	/* try to map dma buffer and do a dma transfer if successful */
+	if (likely(chip->enable_dma))
+		drv_data->dma_mapped = map_dma_buffers(drv_data, msg, transfer);
+	else {
+		WARN_ON(drv_data->dma_mapped != 0);
+		drv_data->dma_mapped = 0;
+	}
+
+	drv_data->transfer_handler = interrupt_transfer;
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+	cr1 = chip->cr1;
+	iowrite32(chip->timeout, reg + SSTO);
+
+	if (likely(drv_data->dma_mapped))
+		cr1 |= drv_data->dma_cr1;
+	else if (!chip->poll_mode)
+		cr1 |= drv_data->int_cr1;
+
+	dev_dbg(&drv_data->pdev->dev,
+		"%s drv_data:%p len:%d n_bytes:%d cr0:%x cr1:%x",
+		(drv_data->dma_mapped ? "DMA io:" :
+		 (chip->poll_mode ? "Poll io:" : "Intr io:")),
+		drv_data, drv_data->len, drv_data->n_bytes, cr0, cr1);
+
+	/* see if we need to reload the config registers */
+	truth = ioread32(reg + SSCR0) != cr0 ||
+		((ioread32(reg + SSCR1) & SSCR1_CHANGE_MASK) !=
+		 (cr1 & SSCR1_CHANGE_MASK));
+	if (unlikely(truth)) {
+		/* stop the SSP, and update the other bits */
+		iowrite32(cr0 & ~SSCR0_SSE, reg + SSCR0);
+		/* first set CR1 without interrupt and service enables */
+		iowrite32(cr1 & SSCR1_CHANGE_MASK, reg + SSCR1);
+		/* restart the SSP */
+		iowrite32(cr0, reg + SSCR0);
+	}
+
+	/* after chip select, release the data by enabling service
+	 * requests and interrupts, without changing any mode bits */
+	iowrite32(cr1, reg + SSCR1);
+
+	if (likely(drv_data->dma_mapped)) {
+		/* transfer using DMA */
+		dma_transfer(drv_data);
+	} else if (chip->poll_mode) {
+		/* transfer using non interrupt polling */
+		tasklet_schedule(&drv_data->poll_transfer);
+	}
+	/*
+	 * if not using dma or poll-mode, transfers are done
+	 * using interrupt driven programmed I/O
+	 */
+
+	return 0;
+}
+
+/**
+ * setup()	- configures hardware according to given @chip
+ * @espi:	ep93xx SPI controller struct
+ * @chip:	chip specific settings
+ *
+ * This function sets up the actual hardware registers with settings given in
+ * @chip
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+	uint tx_thres = TX_THRESH_DFLT;
+	uint rx_thres = RX_THRESH_DFLT;
+	u32 clk_div;
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+
+	if (drv_data->pwrstate == PWRSTATE_OFF) {
+		dev_dbg(&drv_data->pdev->dev, "setup: busy, pwrstate:%d",
+			drv_data->pwrstate);
+		return -EIO;
+	}
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	if ((spi->bits_per_word < 4 || spi->bits_per_word > 32))
+		return -EINVAL;
+
+	/* Only alloc on first setup */
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev,
+				"failed setup: can't allocate chip data");
+			return -ENOMEM;
+		}
+
+		chip->timeout = TIMOUT_DFLT;
+	}
+
+	/*
+	 *  protocol drivers may change the chip settings, so...
+	 * if chip_info exists, use it
+	 */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	if (chip_info) {
+		if (chip_info->timeout)
+			chip->timeout = chip_info->timeout;
+
+		if (chip_info->tx_threshold)
+			tx_thres = chip_info->tx_threshold;
+		if (chip_info->rx_threshold)
+			rx_thres = chip_info->rx_threshold;
+	}
+	chip->enable_dma = TESTMODE(TESTMODE_ENABLE_DMA);
+	chip->poll_mode = TESTMODE(TESTMODE_ENABLE_POLL);
+	chip->enable_loopback = TESTMODE(TESTMODE_ENABLE_LOOPBACK);
+
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+		chip->read = u8_reader;
+		chip->write = u8_writer;
+
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+		chip->read = u16_reader;
+		chip->write = u16_writer;
+	} else if (spi->bits_per_word <= 32) {
+		chip->n_bytes = 4;
+		chip->read = u32_reader;
+		chip->write = u32_writer;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize");
+		return -ENODEV;
+	}
+
+	if (chip->enable_dma) {
+		intel_mid_ssp_spi_dma_init(drv_data);
+		set_dma_width(drv_data, spi->bits_per_word);
+	}
+
+	chip->speed_hz = spi->max_speed_hz;
+	clk_div = ssp_get_clk_div(chip->speed_hz);
+
+	chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
+		(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+	chip->bits_per_word = spi->bits_per_word;
+
+	chip->cr0 = PNWL_CR0(clk_div, spi->bits_per_word, spi, chip);
+	chip->cr1 = PNWL_CR1(spi, chip);
+
+	dev_dbg(&spi->dev,
+		"KHz:%d bpw:%d mode:%d dma:%d poll:%d loop:%d cr0:%x cr1:%x",
+		100000 / clk_div, spi->bits_per_word, spi->mode & 0x3,
+		chip->enable_dma, chip->poll_mode, chip->enable_loopback,
+		chip->cr0, chip->cr1);
+
+	spi_set_ctldata(spi, chip);
+
+	return 0;
+}
+
+/**
+ * cleanup()	- cleans up master controller specific state
+ * @spi:	SPI device to cleanup
+ *
+ * This function releases master controller specific state for given @spi
+ * device.
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+
+	if (drv_data->dma_inited)
+		intel_mid_ssp_spi_dma_exit(drv_data);
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct driver_data *drv_data;
+	int status = 0;
+	int pci_bar = 0;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+	u8 ssp_cfg;
+	int pos;
+
+	/* Check if the SSP we are probed for has been allocated to  */
+	/* operate as SPI master. This information is get from the   */
+	/* field adid of the Vendor-Specific PCI capability which is */
+	/* used as a configuration register.                         */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability");
+		goto err_abort_probe;
+	}
+	if ((SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) ||
+		SSP_CFG_IS_SPI_SLAVE(ssp_cfg)) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)",
+			ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(&pdev->dev, "found PCI SSP controller(ID: %04xh:%04xh"
+		" cfg: %02xh)", pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for drv_data and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct driver_data));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_master");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	drv_data = spi_master_get_devdata(master);
+	drv_data->master = master;
+	drv_data->pdev = pdev;
+	drv_data->pwrstate = PWRSTATE_ON;
+	drv_data->wq = create_workqueue(DRIVER_NAME);
+	INIT_WORK(&drv_data->resume_transfer_work, resume_transfer_work);
+
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
+	master->num_chipselect = 1;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+
+	/* get basic io resource and map it */
+	drv_data->paddr = pci_resource_start(pdev, pci_bar);
+	drv_data->iolen = pci_resource_len(pdev, pci_bar);
+
+	status = pci_request_region(pdev, pci_bar, dev_name(dev));
+	if (status)
+		goto err_free_1;
+
+	drv_data->ioaddr = ioremap_nocache(drv_data->paddr, drv_data->iolen);
+	if (!drv_data->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", drv_data->paddr);
+	dev_dbg(dev, "ioaddr = : %p", drv_data->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x", pdev->irq);
+
+	/* Attach to IRQ */
+	drv_data->irq = pdev->irq;
+
+	status = request_irq(drv_data->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", drv_data);
+	if (status < 0) {
+		dev_err(dev, "can not get IRQ %d", drv_data->irq);
+		goto err_free_3;
+	}
+
+	/* get base address of DMA selector. */
+	syscfg = drv_data->paddr - SYSCFG;
+	syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+	if (!syscfg_ioaddr) {
+		status = -ENOMEM;
+		goto err_free_3;
+	}
+
+	iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+
+	drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
+	drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL;
+	drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
+	drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
+
+	tasklet_init(&drv_data->poll_transfer,
+		     poll_transfer, (unsigned long)drv_data);
+
+	/* Load default SSP configuration */
+	dev_info(dev, "setup default SSP configuration");
+	iowrite32(0, drv_data->ioaddr + SSCR0);
+	iowrite32(SSCR1_RxTresh(RX_THRESH_DFLT) |
+		    SSCR1_TxTresh(TX_THRESH_DFLT),
+		    drv_data->ioaddr + SSCR1);
+	iowrite32(SSCR0_Motorola | SSCR0_DataSize(8), drv_data->ioaddr + SSCR0);
+	iowrite32(0, drv_data->ioaddr + SSTO);
+	iowrite32(PNWL_SSPSP, drv_data->ioaddr + SSPSP);
+
+	/* Register with the SPI framework */
+	dev_info(&pdev->dev, "register with SPI framework (as SPI%d)",
+		 master->bus_num);
+	status = spi_register_master(master);
+	if (status != 0) {
+		dev_err(dev, "problem registering driver");
+		goto err_free_4;
+	}
+
+	pci_set_drvdata(pdev, drv_data);
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_idle(&pdev->dev);
+	pm_runtime_allow(dev);
+
+	return status;
+
+err_free_4:
+	free_irq(drv_data->irq, drv_data);
+err_free_3:
+	iounmap(drv_data->ioaddr);
+err_free_2:
+	pci_release_region(pdev, pci_bar);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	if (!drv_data)
+		return;
+
+	pci_set_drvdata(pdev, NULL);
+
+	free_irq(drv_data->irq, drv_data);
+
+	iounmap(drv_data->ioaddr);
+
+	pci_release_region(pdev, 0);
+
+	spi_unregister_master(drv_data->master);
+
+	pci_disable_device(pdev);
+
+	return;
+}
+
+/*
+ * for now IDLE and OFF states are treated the same
+ */
+static int _pm_suspend(struct pci_dev *pdev, int to)
+{
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+	void *reg = drv_data->ioaddr;
+	int from = drv_data->pwrstate;
+	u32 sssr;
+
+
+	if (to != PWRSTATE_IDLE && to != PWRSTATE_OFF) {
+		dev_err(&pdev->dev, "ERROR: suspend: invalid dst pwrstate %x",
+			to);
+		return -EINVAL;
+	}
+
+	switch (from) {
+	case PWRSTATE_ON:
+		dev_dbg(&pdev->dev, "suspend: turn off SSP");
+		if (have_fifo_data(drv_data, &sssr)) {
+			dev_err(&pdev->dev,
+				"ERROR: suspend: i/o present! sssr:%x", sssr);
+			return -EBUSY;
+		}
+		tasklet_disable(&drv_data->poll_transfer);
+		drv_data->pwrstate = to;
+		iowrite32(0, reg + SSCR0);
+		dev_dbg(&pdev->dev, "suspend: cr0:%x cr1:%x sssr:%x",
+			ioread32(reg + SSCR0), ioread32(reg + SSCR1),
+			ioread32(reg + SSSR));
+		break;
+	case PWRSTATE_IDLE:
+	case PWRSTATE_OFF:
+		drv_data->pwrstate = to;
+		break;
+	default:
+		dev_err(&pdev->dev, "ERROR: suspend: invalid src pwrstate %x",
+			from);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * for now IDLE and OFF states are treated the same
+ */
+static void _pm_resume(struct pci_dev *pdev)
+{
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+	void *reg = drv_data->ioaddr;
+
+	switch (drv_data->pwrstate) {
+	default:
+		dev_err(&pdev->dev, "ERROR: resume: invalid src pwrstate %x",
+			drv_data->pwrstate);
+		/* fall through ... */
+	case PWRSTATE_IDLE:
+	case PWRSTATE_OFF:
+		dev_dbg(&pdev->dev, "resume: turn on SSP");
+
+		/*
+		 * we don't bother reconfiguring the registers
+		 * on resume - that will get done when transfer()
+		 * is called
+		 */
+		tasklet_enable(&drv_data->poll_transfer);
+		dev_dbg(&pdev->dev, "resume: cr0:%x cr1:%x sssr:%x",
+			ioread32(reg + SSCR0), ioread32(reg + SSCR1),
+			ioread32(reg + SSSR));
+
+		drv_data->pwrstate = PWRSTATE_ON;
+		break;
+	case PWRSTATE_ON:
+		break;
+	}
+}
+
+
+#ifdef CONFIG_PM
+
+static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int retval;
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s called", __func__);
+	if (drv_data->pwrstate != PWRSTATE_ON)
+		dev_warn(&pdev->dev, "suspend: !on, pwrstate:%d",
+			 drv_data->pwrstate);
+	retval = _pm_suspend(pdev, PWRSTATE_OFF);
+	if (retval)
+		return retval;
+	retval = pci_prepare_to_sleep(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "suspend: prepare to sleep failed");
+		return retval;
+	}
+	pci_disable_device(pdev);
+	return 0;
+}
+
+static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
+{
+	int retval;
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s called", __func__);
+	if (drv_data->pwrstate != PWRSTATE_OFF)
+		dev_warn(&pdev->dev, "resume: !off, pwrstate:%d",
+			 drv_data->pwrstate);
+	retval = pci_enable_device(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "resume: back from sleep failed");
+		return retval;
+	}
+	retval = pci_back_from_sleep(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "resume: back from sleep failed");
+		return retval;
+	}
+	_pm_resume(pdev);
+	return 0;
+}
+
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#endif /* CONFIG_PM */
+
+
+static int intel_mid_ssp_spi_pm_runtime_resume(struct device *dev)
+{
+	int retval;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s called", __func__);
+	if (drv_data->pwrstate == PWRSTATE_ON)
+		return 0;
+	if (drv_data->pwrstate != PWRSTATE_IDLE)
+		dev_warn(&pdev->dev, "rt resume: !idle, pwrstate:%d",
+			 drv_data->pwrstate);
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	retval = pci_enable_device(pdev);
+	if (retval)
+		return retval;
+	_pm_resume(pdev);
+
+	return retval;
+}
+
+static int intel_mid_ssp_spi_pm_runtime_suspend(struct device *dev)
+{
+	int retval;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s called", __func__);
+	if (drv_data->pwrstate != PWRSTATE_ON)
+		dev_warn(&pdev->dev, "rt suspend: !on, pwrstate:%d",
+			 drv_data->pwrstate);
+	retval = _pm_suspend(pdev, PWRSTATE_IDLE);
+	if (retval)
+		return retval;
+	pci_save_state(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return retval;
+}
+
+static int intel_mid_ssp_spi_pm_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "%s called", __func__);
+	pm_schedule_suspend(dev, 100);
+	return -EBUSY;
+}
+
+static const struct dev_pm_ops intel_mid_ssp_spi_pm = {
+	.runtime_resume = intel_mid_ssp_spi_pm_runtime_resume,
+	.runtime_suspend = intel_mid_ssp_spi_pm_runtime_suspend,
+	.runtime_idle =  intel_mid_ssp_spi_pm_runtime_idle,
+};
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+	{ PCI_VDEVICE(INTEL, 0x0816) },
+	{ }
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.driver = {
+		.pm = &intel_mid_ssp_spi_pm,
+	},
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	__devexit_p(intel_mid_ssp_spi_remove),
+	.suspend =	intel_mid_ssp_spi_suspend,
+	.resume =	intel_mid_ssp_spi_resume,
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+module_exit(intel_mid_ssp_spi_exit);
diff --git a/drivers/spi/intel_mid_ssp_spi_def.h b/drivers/spi/intel_mid_ssp_spi_def.h
new file mode 100644
index 0000000..4610d62
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi_def.h
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (C) Intel 2010
+ *  Ken Mills <ken.k.mills@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_DEF_H_
+#define INTEL_MID_SSP_SPI_DEF_H_
+
+
+/*
+ * Penwell SSP register definitions
+ */
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)	/* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola	      (0x0 << 4)	 /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS	      (1 << 20)	/* Extended data size select */
+#define SSCR0_NCS   (1 << 21)		/* Network clock select */
+#define SSCR0_RIM    (1 << 22)		 /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)		/* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)	    /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24)	/* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)		/* Audio clock select */
+#define SSCR0_MOD  (1 << 31)	       /* Mode (normal or network) */
+
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS	     (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF     (1 << 2) /* Transmit FIFO Not Full */
+#define SSSR_RNE     (1 << 3) /* Receive FIFO Not Empty */
+#define SSSR_BSY     (1 << 4) /* SSP Busy */
+#define SSSR_TFS     (1 << 5) /* Transmit FIFO Service Request */
+#define SSSR_RFS     (1 << 6) /* Receive FIFO Service Request */
+#define SSSR_ROR    (1 << 7) /* Receive FIFO Overrun */
+#define SSSR_TFL     (0x0f00) /* Transmit FIFO Level (mask) */
+#define SSSR_RFL     (0xf000) /* Receive FIFO Level (mask) */
+
+#define SSCR0_TIM    (1 << 23)		 /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)		 /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS   (1 << 21)		/* Network Clock Select */
+#define SSCR0_EDSS	      (1 << 20)	/* Extended Data Size Select */
+
+#define SSCR0_TISSP	       (1 << 4) /* TI Sync Serial Protocol */
+#define SSCR0_PSP   (3 << 4) /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP	       (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE    (1 << 30)		 /* TXD Tristate Enable */
+#define SSCR1_EBCEI	       (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR (1 << 28)	       /* Slave Clock free Running */
+#define SSCR1_ECRA (1 << 27)	       /* Enable Clock Request A */
+#define SSCR1_ECRB (1 << 26)	       /* Enable Clock request B */
+#define SSCR1_SCLKDIR	     (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR	    (1 << 24)		/* Frame Direction */
+#define SSCR1_RWOT	      (1 << 23)		  /* Receive Without Transmit */
+#define SSCR1_TRAIL (1 << 22)		/* Trailing Byte */
+#define SSCR1_TSRE (1 << 21)	       /* Transmit Service Request Enable */
+#define SSCR1_RSRE (1 << 20)	       /* Receive Service Request Enable */
+#define SSCR1_TINTE (1 << 19)		/* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE	       (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_IFS		(1 << 16)	/* Invert Frame Signal */
+#define SSCR1_STRF (1 << 15)	       /* Select FIFO or EFWR */
+#define SSCR1_EFWR	      (1 << 14)		  /* Enable FIFO Write/Read */
+
+#define SSSR_BCE     (1 << 23)		 /* Bit Count Error */
+#define SSSR_CSS     (1 << 22)		 /* Clock Synchronisation Status */
+#define SSSR_TUR     (1 << 21)		 /* Transmit FIFO Under Run */
+#define SSSR_EOC    (1 << 20)		/* End Of Chain */
+#define SSSR_TINT     (1 << 19)		  /* Receiver Time-out Interrupt */
+#define SSSR_PINT    (1 << 18)	    /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT (1 << 25)	       /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23)	     /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x) ((x) << 16)	   /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)	      /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)	      /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)	       /* Start Delay */
+#define SSPSP_ETDS	      (1 << 3) /* End of Transfer data State */
+#define SSPSP_SFRMP	     (1 << 2) /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)	  ((x) << 0)	       /* Serial Bit Rate Clock Mode */
+
+#define SSCR0	0x00
+#define SSCR1	0x04
+#define SSSR	0x08
+#define SSITR	0x0c
+#define SSDR	0x10
+#define SSTO	0x28
+#define SSPSP	0x2c
+#define SYSCFG	0x20bc0
+
+/* SSP assignement configuration from PCI config */
+#define SSP_CFG_GET_MODE(ssp_cfg)	((ssp_cfg) & 0x07)
+#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)	(((ssp_cfg) >> 3) & 0x07)
+#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)	((ssp_cfg) & 0x40)
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	u8 tx_threshold;
+	u8 rx_threshold;
+	u8 dma_burst_size;
+	u32 timeout;
+	u16 extra_data[5];
+};
+
+
+#endif /* INTEL_MID_SSP_SPI_DEF_H_ */
-- 
1.7.2.3


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
       [not found] <[PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011>
  2011-02-02 21:01 ` [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011 Russ Gorby
  2011-02-02 21:01 ` Russ Gorby
@ 2011-02-02 21:01 ` Russ Gorby
  2011-02-02 21:01 ` Russ Gorby
  3 siblings, 0 replies; 13+ messages in thread
From: Russ Gorby @ 2011-02-02 21:01 UTC (permalink / raw)
  To: David Brownell, Grant Likely,
	spi-devel-general-5NWGOfrQmneRv+LV9MX5uuRhgaa4a2kL

SPI master controller driver for the Intel MID platform Medfield
This driver uses the Penwell SSP controller and configures it to
be a SPI device (spibus 3). This bus supports a single device -
the 3G SPI modem that can operate up to 25Mhz.

Signed-off-by: Russ Gorby <russ.gorby-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
 drivers/spi/Kconfig                 |    7 +
 drivers/spi/Makefile                |    1 +
 drivers/spi/intel_mid_ssp_spi.c     | 1507 +++++++++++++++++++++++++++++++++++
 drivers/spi/intel_mid_ssp_spi_def.h |  139 ++++
 4 files changed, 1654 insertions(+), 0 deletions(-)
 create mode 100644 drivers/spi/intel_mid_ssp_spi.c
 create mode 100644 drivers/spi/intel_mid_ssp_spi_def.h

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bb233a9..60ba339 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -178,6 +178,13 @@ config SPI_IMX
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel Medfield platform"
+	depends on SPI_MASTER && INTEL_MID_DMAC
+	help
+	  This is the SPI master controller driver for the Intel
+	  Medfield MID platform.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 86d1b5f..c64deb9 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_SPI_SH_SCI)		+= spi_sh_sci.o
 obj-$(CONFIG_SPI_SH_MSIOF)		+= spi_sh_msiof.o
 obj-$(CONFIG_SPI_STMP3XXX)		+= spi_stmp.o
 obj-$(CONFIG_SPI_NUC900)		+= spi_nuc900.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)         += intel_mid_ssp_spi.o
 
 # special build for s3c24xx spi driver with fiq support
 spi_s3c24xx_hw-y			:= spi_s3c24xx.o
diff --git a/drivers/spi/intel_mid_ssp_spi.c b/drivers/spi/intel_mid_ssp_spi.c
new file mode 100644
index 0000000..19c62bc
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi.c
@@ -0,0 +1,1507 @@
+/*
+ *  intel_mid_ssp_spi.c - Penwell SPI master controller driver
+ *  based on pxa2xx.c
+ *
+ *  Copyright (C) Intel 2010
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *  Russ Gorby <russ.gorby-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_qos_params.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include "intel_mid_ssp_spi_def.h"
+
+#define DRIVER_NAME		"intel_mid_ssp_spi"
+#define PCI_DMAC_MAXDI		2047
+#define PCI_DMAC_ID		0x0827
+/* PM QoS define */
+#define MIN_EXIT_LATENCY	20
+
+#define TESTMODE_COMMON_MASK	0x00ff
+#define TESTMODE_PRIV_MASK	0xff00
+#define TESTMODE_ENABLE_DMA	0x01
+#define TESTMODE_ENABLE_POLL	0x02
+#define TESTMODE_ENABLE_LOOPBACK 0x04
+#define TESTMODE_ENABLE_INTR	0x08
+#define TESTMODE(x)		(testmode & x)
+static unsigned int testmode = (TESTMODE_ENABLE_DMA | TESTMODE_ENABLE_POLL);
+
+module_param(testmode, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(testmode, "supply test mode bits");
+
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Penwell SPI3 Master Contoller");
+MODULE_LICENSE("GPL");
+
+#define RX_THRESH_DFLT		8
+#define TX_THRESH_DFLT		8
+#define TIMOUT_DFLT		1000
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define PNWL_SSPSP (SSPSP_FSRT | SSPSP_SFRMWDTH(1) | SSPSP_SFRMP | \
+		    SSPSP_SCMODE(3))
+
+/*
+ * clock divider
+ * 8 bpw
+ * TUR/ROR do not generate interrupt
+ * SPI mode operation
+ * SSP enabled
+ */
+#define PNWL_CR0(clk, bits, spi, chip)	\
+	((SSCR0_SerClkDiv(clk) & SSCR0_SCR) |				\
+	 SSCR0_Motorola |						\
+	 SSCR0_DataSize(bits > 16 ? bits - 16 : bits) |			\
+	 SSCR0_SSE |							\
+	 SSCR0_TIM |							\
+	 SSCR0_RIM |							\
+	 (bits > 16 ? SSCR0_EDSS : 0))
+
+#define PNWL_CR1_MASTER_ROLE	0
+#define PNWL_CR1_SLAVE_ROLE	(SSCR1_SFRMDIR | SSCR1_SCLKDIR)
+/* MRST SSP must be slave */
+#define PNWL_CR1_ROLE		PNWL_CR1_MASTER_ROLE
+#define PNWL_CR1(spi, chip)	\
+	  ((chip->enable_loopback ? SSCR1_LBM : 0) | \
+	  ((spi->mode & SPI_CPHA) ? SSCR1_SPH : 0) | \
+	  ((spi->mode & SPI_CPOL) ? SSCR1_SPO : 0) | \
+	  SSCR1_SCFR | \
+	  chip->threshold | \
+	  PNWL_CR1_ROLE)
+
+
+
+struct callback_param {
+	void *drv_data;
+	int *donep;
+};
+
+enum dd_pwrstate {
+	PWRSTATE_ON = 1,
+	PWRSTATE_IDLE,
+	PWRSTATE_OFF,
+};
+
+enum dd_pwrflags {
+	PWRFLAG_RTRESUMING,
+};
+
+struct driver_data {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void __iomem *ioaddr;
+	u32 iolen;
+	int irq;
+
+	/* SSP masks*/
+	u32 dma_cr1;
+	u32 int_cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+
+	/* Current message transfer state info */
+	struct tasklet_struct poll_transfer;
+	struct spi_message *cur_msg;
+	size_t len;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	size_t rx_map_len;
+	size_t tx_map_len;
+	u8 n_bytes;
+	int (*write)(struct driver_data *drv_data);
+	int (*read)(struct driver_data *drv_data);
+	irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+	void (*cs_control)(u32 command);
+	struct workqueue_struct *wq;
+	struct work_struct resume_transfer_work;
+
+	/* controller state */
+	int dma_inited;
+
+	/* pwrstate mgmt */
+	int pwrstate;		/* enum dd_pwrstate */
+	unsigned long pwrflags;	/* enum dd_pwrflags */
+
+	/* used by DMA code */
+	struct pci_dev *dmac1;
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan	   *txchan;
+	struct dma_chan	   *rxchan;
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 psp;
+	u32 timeout;
+	u8 n_bytes;
+	u32 threshold;
+	u8 enable_dma;		/* use dma if possible */
+	u8 poll_mode;		/* use poll mode */
+	u8 enable_loopback;	/* configure in loopback mode */
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct driver_data *drv_data);
+	int (*read)(struct driver_data *drv_data);
+};
+
+static int transfer(struct spi_device *, struct spi_message *);
+
+static inline int have_fifo_data(struct driver_data *drv_data, u32 *sssrp)
+{
+	u32 sssr;
+	void *reg = drv_data->ioaddr;
+	sssr = ioread32(reg + SSSR);
+
+	if (sssrp)
+		*sssrp = sssr;
+	return ((sssr & SSSR_TFL) || !(sssr & SSSR_TNF)) ||
+		((sssr & SSSR_RFL) != SSSR_RFL || (sssr & SSSR_RNE));
+}
+
+static void flush(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u32 sssr;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (have_fifo_data(drv_data, &sssr)) {
+		dev_warn(&drv_data->pdev->dev,
+			 "ERROR: flush: fifos not empty! sssr:%x", sssr);
+		iowrite32(ioread32(reg + SSCR0) & ~SSCR0_SSE, reg + SSCR0);
+		return;
+	}
+
+	iowrite32(SSSR_ROR, reg + SSSR);
+	iowrite32(SSSR_TUR, reg + SSSR);
+}
+
+/*
+ * reader/writer functions
+ *
+ * *_reader functions return:
+ *	0: not complete (data not available)
+ *	1: *all* requested data has been read
+ *
+ * *_writer functions return:
+ *	1: data successfully writen
+ *	0: *all* requested data already written *or* full condition hit
+ *	note: this means caller must verify write-complete condition
+ *
+ */
+static int null_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u8 n_bytes = drv_data->n_bytes;
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end))
+		return 0;
+
+	iowrite32(0, reg + SSDR);
+	drv_data->tx += n_bytes;
+
+	return 1;
+}
+
+static int null_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u8 n_bytes = drv_data->n_bytes;
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE) &&
+	       (drv_data->rx < drv_data->rx_end)) {
+
+		ioread32(reg + SSDR);
+		drv_data->rx += n_bytes;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+static int u8_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end))
+		return 0;
+
+	iowrite32(*(u8 *)(drv_data->tx), reg + SSDR);
+	dev_dbg(&drv_data->pdev->dev, "u8_write: %x", ((u8 *)drv_data->tx)[0]);
+	drv_data->tx++;
+
+	return 1;
+}
+
+static int u8_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE)
+	       && (drv_data->rx < drv_data->rx_end)) {
+
+		*(u8 *)(drv_data->rx) = ioread32(reg + SSDR);
+		drv_data->rx++;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+static int u16_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end))
+		return 0;
+
+	iowrite32(*(u16 *)(drv_data->tx), reg + SSDR);
+	drv_data->tx += 2;
+
+	return 1;
+}
+
+static int u16_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	dev_dbg(&drv_data->pdev->dev, "u16_read");
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE)
+	       && (drv_data->rx < drv_data->rx_end)) {
+
+		*(u16 *)(drv_data->rx) = ioread32(reg + SSDR);
+		drv_data->rx += 2;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+static int u32_writer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	dev_dbg(&drv_data->pdev->dev, "u32_write");
+
+	if (((ioread32(reg + SSSR) & SSSR_TFL) == SSSR_TFL)
+	    || (drv_data->tx == drv_data->tx_end)) {
+		return 0;
+	}
+
+	iowrite32(*(u32 *)(drv_data->tx), reg + SSDR);
+	drv_data->tx += 4;
+
+	return 1;
+}
+
+static int u32_reader(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+
+	while ((ioread32(reg + SSSR) & SSSR_RNE)
+	       && (drv_data->rx < drv_data->rx_end)) {
+
+		*(u32 *)(drv_data->rx) = ioread32(reg + SSDR);
+		drv_data->rx += 4;
+	}
+
+	return drv_data->rx == drv_data->rx_end;
+}
+
+
+/* caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct driver_data *drv_data)
+{
+	struct spi_message *msg;
+
+	msg = drv_data->cur_msg;
+	drv_data->cur_msg = NULL;
+	msg->state = NULL;
+	if (msg->complete)
+		msg->complete(msg->context);
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct driver_data *drv_data = (struct driver_data *)param;
+	bool ret = false;
+
+	if (!drv_data->dmac1)
+		return ret;
+
+	if (chan->device->dev == &drv_data->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @drv_data:		Pointer to the private driver data
+ */
+static void unmap_dma_buffers(struct driver_data *drv_data,
+			      struct spi_message *msg)
+{
+	struct device *dev = &drv_data->pdev->dev;
+
+	if (unlikely(!drv_data->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers not mapped");
+		return;
+	}
+	if (unlikely(msg->is_dma_mapped))
+		return;
+
+	dma_unmap_single(dev, drv_data->rx_dma, drv_data->len, DMA_FROM_DEVICE);
+	dma_unmap_single(dev, drv_data->tx_dma, drv_data->len, DMA_TO_DEVICE);
+	drv_data->dma_mapped = 0;
+}
+
+
+static void dma_transfer_complete(void *arg)
+{
+	struct callback_param *param = arg;
+	struct driver_data *drv_data = (struct driver_data *)param->drv_data;
+	int *done;
+	void *reg;
+	u32 sscr1;
+
+	done = (int *)param->donep;
+	reg = drv_data->ioaddr;
+	*done = 1;
+
+	if (!drv_data->txdma_done || !drv_data->rxdma_done)
+		return;
+
+	/* Clear Status Register */
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+
+	sscr1 = ioread32(reg + SSCR1);
+
+	/* Disable Triggers to DMA */
+	sscr1 &= ~drv_data->dma_cr1;
+
+	/* Disable Interrupt */
+	sscr1 &= ~drv_data->int_cr1;
+	iowrite32(sscr1, reg + SSCR1);
+
+	/* Stop getting Time Outs */
+	iowrite32(0, reg + SSTO);
+
+	/* release DMA mappings */
+	unmap_dma_buffers(drv_data, drv_data->cur_msg);
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_data->cur_msg->actual_length = drv_data->len;
+
+	drv_data->cur_msg->status = 0;
+	giveback(drv_data);
+	pm_runtime_put(&drv_data->pdev->dev);
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @drv_data:		Pointer to the private driver data
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct driver_data *drv_data)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	dma_cap_mask_t mask;
+
+	if (drv_data->dma_inited)
+		return;
+
+	drv_data->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DMAC_ID,
+					 NULL);
+	if (!drv_data->dmac1) {
+		dev_warn(&drv_data->pdev->dev, "Can't find DMAC %x",
+			 PCI_DMAC_ID);
+		return;
+	}
+
+	/* 1. init rx channel */
+	rxs = &drv_data->dmas_rx;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	rxs->dma_slave.direction = DMA_FROM_DEVICE;
+	rxs->dma_slave.src_maxburst = LNW_DMA_MSIZE_8;
+	rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	rxs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	drv_data->rxchan = dma_request_channel(mask, chan_filter, drv_data);
+	if (!drv_data->rxchan)
+		goto err_exit;
+	drv_data->rxchan->private = rxs;
+
+	/* 2. init tx channel */
+	txs = &drv_data->dmas_tx;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	txs->dma_slave.direction = DMA_TO_DEVICE;
+	txs->dma_slave.dst_maxburst = LNW_DMA_MSIZE_8;
+	txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	txs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	drv_data->txchan = dma_request_channel(mask, chan_filter, drv_data);
+	if (!drv_data->txchan)
+		goto free_rxchan;
+	drv_data->txchan->private = txs;
+
+	drv_data->dma_inited = 1;
+	drv_data->txdma_done = 1;
+	drv_data->rxdma_done = 1;
+
+	drv_data->tx_param.drv_data = (void *)drv_data;
+	drv_data->tx_param.donep = &drv_data->txdma_done;
+	drv_data->rx_param.drv_data = (void *)drv_data;
+	drv_data->rx_param.donep = &drv_data->rxdma_done;
+	return;
+
+free_rxchan:
+	dev_err(&drv_data->pdev->dev, "DMA TX Channel Not available");
+	dma_release_channel(drv_data->rxchan);
+err_exit:
+	dev_err(&drv_data->pdev->dev, "DMA RX Channel Not available");
+	pci_dev_put(drv_data->dmac1);
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @drv_data:		Pointer to the private driver data
+ */
+static void intel_mid_ssp_spi_dma_exit(struct driver_data *drv_data)
+{
+	if (!drv_data->dma_inited)
+		return;
+	dma_release_channel(drv_data->txchan);
+	dma_release_channel(drv_data->rxchan);
+	pci_dev_put(drv_data->dmac1);
+	drv_data->dma_inited = 0;
+}
+
+static void dma_transfer(struct driver_data *drv_data)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct dma_slave_config *txconf, *rxconf;
+	struct device *dev = &drv_data->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(drv_data->paddr + 0x10);
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	/* 2. Prepare the RX dma transfer	- DMA_FROM_DEVICE */
+	if (drv_data->rx_dma) {
+		rxconf = &drv_data->dmas_rx.dma_slave;
+		rxconf->src_addr = drv_data->rx_dma;
+		rxchan = drv_data->rxchan;
+		rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+					       (unsigned long) rxconf);
+		rxdesc = rxchan->device->device_prep_dma_memcpy
+			(rxchan,		/* DMA Channel */
+			 drv_data->rx_dma,	/* DAR */
+			 ssdr_addr,		/* SAR */
+			 drv_data->len,		/* Data Length */
+			 flag);			/* Flag */
+		if (!rxdesc) {
+			dev_err(dev, "ERROR : rxdesc is null!");
+			return;
+		}
+		rxdesc->callback = dma_transfer_complete;
+		rxdesc->callback_param = &drv_data->rx_param;
+		drv_data->rxdma_done = 0;
+	}
+
+	/* 3. Prepare the TX dma transfer	-  DMA_TO_DEVICE */
+	if (drv_data->tx_dma) {
+		txconf = &drv_data->dmas_tx.dma_slave;
+		txconf->dst_addr = drv_data->rx_dma;
+		txchan = drv_data->txchan;
+		txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+					       (unsigned long) txconf);
+		txdesc = txchan->device->device_prep_dma_memcpy
+			(txchan,		/* DMA Channel */
+			 ssdr_addr,		/* DAR */
+			 drv_data->tx_dma,	/* SAR */
+			 drv_data->len,		/* Data Length */
+			 flag);			/* Flag */
+		if (!txdesc) {
+			dev_err(dev, "ERROR : txdesc is null!");
+			return;
+		}
+		txdesc->callback = dma_transfer_complete;
+		txdesc->callback_param = &drv_data->tx_param;
+		drv_data->txdma_done = 0;
+	}
+
+	if (rxdesc)
+		rxdesc->tx_submit(rxdesc);
+	if (txdesc)
+		txdesc->tx_submit(txdesc);
+
+}
+
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @drv_data:		Pointer to the private driver data
+ */
+static int map_dma_buffers(struct driver_data *drv_data,
+			   struct spi_message *msg,
+			   struct spi_transfer *transfer)
+{
+	struct device *dev = &drv_data->pdev->dev;
+
+	if (unlikely(drv_data->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped");
+		return 0;
+	}
+	if (unlikely(msg->is_dma_mapped)) {
+		drv_data->rx_dma = transfer->rx_dma;
+		drv_data->tx_dma = transfer->tx_dma;
+		return 1;
+	}
+	if (drv_data->len > PCI_DMAC_MAXDI * drv_data->n_bytes) {
+		/* if length is too long we revert to programmed I/O */
+		return 0;
+	}
+
+	if (likely(drv_data->rx)) {
+		drv_data->rx_dma =
+			dma_map_single(dev, drv_data->rx,
+				       drv_data->len, DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_data->rx_dma))) {
+			dev_err(dev, "ERROR : rx dma mapping failed");
+			return 0;
+		}
+	}
+	if (likely(drv_data->tx)) {
+		drv_data->tx_dma =
+			dma_map_single(dev, drv_data->tx,
+				       drv_data->len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_data->tx_dma))) {
+			dma_unmap_single(dev, drv_data->rx_dma,
+					 drv_data->len, DMA_FROM_DEVICE);
+			dev_err(dev, "ERROR : tx dma mapping failed");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+static void set_dma_width(struct driver_data *drv_data, int bits)
+{
+	struct dma_slave_config *rxconf, *txconf;
+	rxconf = &drv_data->dmas_rx.dma_slave;
+	txconf = &drv_data->dmas_tx.dma_slave;
+
+	if (bits <= 8) {
+		rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	} else if (bits <= 16) {
+		rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	} else if (bits <= 32) {
+		rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	}
+}
+
+static void int_error_stop(struct driver_data *drv_data, const char* msg)
+{
+	void *reg = drv_data->ioaddr;
+
+	/* Stop and reset SSP */
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+	iowrite32(ioread32(reg + SSCR1) & ~drv_data->int_cr1, reg + SSCR1);
+	iowrite32(0, reg + SSTO);
+	flush(drv_data);
+
+	dev_err(&drv_data->pdev->dev, "%s", msg);
+
+}
+
+static void int_transfer_complete(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u32 sscr1;
+
+	dev_dbg(&drv_data->pdev->dev, "interrupt transfer complete");
+	/* Clear Status Register */
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+
+	sscr1 = ioread32(reg + SSCR1);
+
+	/* Disable Triggers to DMA */
+	sscr1 &= ~drv_data->dma_cr1;
+
+	/* Disable Interrupt */
+	sscr1 &= ~drv_data->int_cr1;
+	iowrite32(sscr1, reg + SSCR1);
+
+	/* Stop getting Time Outs */
+	iowrite32(0, reg + SSTO);
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_data->cur_msg->actual_length += drv_data->len -
+		(drv_data->rx_end - drv_data->rx);
+
+	drv_data->cur_msg->status = 0;
+	giveback(drv_data);
+	pm_runtime_put(&drv_data->pdev->dev);
+}
+
+static void transfer_complete(struct driver_data *drv_data)
+{
+	/* Update total byte transfered return count actual bytes read */
+	drv_data->cur_msg->actual_length +=
+		drv_data->len - (drv_data->rx_end - drv_data->rx);
+
+	drv_data->cur_msg->status = 0;
+	giveback(drv_data);
+	pm_runtime_put(&drv_data->pdev->dev);
+}
+
+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
+{
+	void *reg = drv_data->ioaddr;
+	u32 irq_mask = (ioread32(reg + SSCR1) & SSCR1_TIE) ?
+		drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
+
+	u32 irq_status = ioread32(reg + SSSR) & irq_mask;
+	if (irq_status & SSSR_ROR) {
+		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
+		return IRQ_HANDLED;
+	}
+
+	if (irq_status & SSSR_TINT) {
+		iowrite32(SSSR_TINT, reg + SSSR);
+		if (drv_data->read(drv_data)) {
+			int_transfer_complete(drv_data);
+			return IRQ_HANDLED;
+		}
+	}
+
+	/* Drain rx fifo, Fill tx fifo and prevent overruns */
+	do {
+		if (drv_data->read(drv_data)) {
+			int_transfer_complete(drv_data);
+			return IRQ_HANDLED;
+		}
+	} while (drv_data->write(drv_data));
+
+	if (drv_data->read(drv_data)) {
+		int_transfer_complete(drv_data);
+		return IRQ_HANDLED;
+	}
+
+	if (drv_data->tx == drv_data->tx_end)
+		iowrite32(ioread32(reg + SSCR1) & ~SSCR1_TIE, reg + SSCR1);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct driver_data *drv_data = dev_id;
+	void *reg = drv_data->ioaddr;
+	u32 status = ioread32(reg + SSSR);
+
+	if (status & (SSSR_ROR | SSSR_TUR)) {
+		dev_dbg(&drv_data->pdev->dev,
+			"--- SPI ROR or TUR Occred : SSSR=%x", status);
+
+		if (drv_data->dma_mapped) {
+			iowrite32(SSSR_ROR, reg + SSSR);	/* Clear ROR */
+			iowrite32(SSSR_TUR, reg + SSSR);	/* Clear TUR */
+			return IRQ_HANDLED;
+		}
+	}
+
+	/* just return if this is not our interrupt */
+	if (!(ioread32(reg + SSSR) & drv_data->mask_sr))
+		return IRQ_NONE;
+
+	if (!drv_data->cur_msg) {
+		iowrite32(ioread32(reg + SSCR0) & ~SSCR0_SSE, reg + SSCR0);
+		iowrite32(ioread32(reg + SSCR1) & ~drv_data->int_cr1,
+			reg + SSCR1);
+		iowrite32(drv_data->clear_sr, reg + SSSR);
+
+		/* Never fail */
+
+		return IRQ_HANDLED;
+	}
+
+	return drv_data->transfer_handler(drv_data);
+}
+
+static void poll_transfer(unsigned long data)
+{
+	struct driver_data *drv_data = (struct driver_data *)data;
+
+	if (drv_data->tx)
+		while (drv_data->tx != drv_data->tx_end) {
+			drv_data->write(drv_data);
+			drv_data->read(drv_data);
+	}
+
+	while (!drv_data->read(drv_data))
+		cpu_relax();
+
+	transfer_complete(drv_data);
+}
+
+static unsigned int ssp_get_clk_div(int speed)
+{
+	u32 clk_div;
+
+	/*
+	 * fabric clock: 100MHz
+	 * SSP clock: 25MHz max
+	 */
+	clk_div = max(100000000 / speed, 4);
+
+	return clk_div;
+}
+
+/**
+ * resume_transfer_work()	- resume from pm_runtime sleep then
+ *		perform transfer() work
+ */
+static void resume_transfer_work(struct work_struct *work)
+{
+	struct driver_data *drv_data = container_of(work, struct driver_data,
+						    resume_transfer_work);
+	struct spi_message *msg;
+
+	pm_runtime_get_sync(&drv_data->pdev->dev);
+	WARN_ON(drv_data->pwrstate != PWRSTATE_ON);
+	msg = drv_data->cur_msg;
+	drv_data->cur_msg = NULL;
+	transfer(msg->spi, msg);
+}
+
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = drv_data->ioaddr;
+	int truth;
+	u8 bits;
+	u32 clk_div;
+	u32 speed;
+	u32 cr0;
+	u32 cr1;
+
+	if (unlikely(drv_data->pwrstate == PWRSTATE_OFF)) {
+		dev_dbg(&drv_data->pdev->dev, "transfer: busy, pwrstate:%d",
+			drv_data->pwrstate);
+		return -EIO;
+	}
+	if (unlikely(drv_data->pwrstate == PWRSTATE_IDLE)) {
+		truth = test_and_set_bit(PWRFLAG_RTRESUMING,
+					 &drv_data->pwrflags);
+		if (truth > 0) {
+			WARN_ON(1);
+			return -EBUSY;
+		}
+		drv_data->cur_msg = msg;
+		queue_work(drv_data->wq, &drv_data->resume_transfer_work);
+		return 0;
+	}
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	drv_data->cur_msg = msg;
+
+	/* We handle only one transfer message since the protocol module has to
+	   control the out of band signaling. */
+	transfer = list_entry(msg->transfers.next, struct spi_transfer,
+			      transfer_list);
+
+	/* Setup the transfer state based on the type of transfer */
+	if (likely(!test_and_clear_bit(PWRFLAG_RTRESUMING,
+				       &drv_data->pwrflags)))
+		pm_runtime_get(&drv_data->pdev->dev);
+	chip = spi_get_ctldata(msg->spi);
+	flush(drv_data);
+	drv_data->n_bytes = chip->n_bytes;
+	drv_data->tx = (void *)transfer->tx_buf;
+	drv_data->tx_end = drv_data->tx + transfer->len;
+	drv_data->rx = transfer->rx_buf;
+	drv_data->rx_end = drv_data->rx + transfer->len;
+	drv_data->rx_dma = transfer->rx_dma;
+	drv_data->tx_dma = transfer->tx_dma;
+	drv_data->len = transfer->len;
+	drv_data->write = drv_data->tx ? chip->write : null_writer;
+	drv_data->read = drv_data->rx ? chip->read : null_reader;
+
+	/* Change speed and bit per word on a per transfer */
+	cr0 = chip->cr0;
+	if (transfer->speed_hz || transfer->bits_per_word) {
+
+		bits = chip->bits_per_word;
+		speed = chip->speed_hz;
+
+		if (transfer->speed_hz)
+			speed = transfer->speed_hz;
+
+		clk_div = ssp_get_clk_div(speed);
+
+		if (transfer->bits_per_word)
+			bits = transfer->bits_per_word;
+
+		if (bits <= 8) {
+			drv_data->n_bytes = 1;
+			drv_data->read = drv_data->read != null_reader ?
+				u8_reader : null_reader;
+			drv_data->write = drv_data->write != null_writer ?
+				u8_writer : null_writer;
+		} else if (bits <= 16) {
+			drv_data->n_bytes = 2;
+			drv_data->read = drv_data->read != null_reader ?
+				u16_reader : null_reader;
+			drv_data->write = drv_data->write != null_writer ?
+				u16_writer : null_writer;
+		} else if (bits <= 32) {
+			drv_data->n_bytes = 4;
+			drv_data->read = drv_data->read != null_reader ?
+				u32_reader : null_reader;
+			drv_data->write = drv_data->write != null_writer ?
+				u32_writer : null_writer;
+		}
+		if (likely(chip->enable_dma))
+			set_dma_width(drv_data, bits);
+		cr0 = PNWL_CR0(clk_div, bits, spi, chip);
+	}
+
+	/* try to map dma buffer and do a dma transfer if successful */
+	if (likely(chip->enable_dma))
+		drv_data->dma_mapped = map_dma_buffers(drv_data, msg, transfer);
+	else {
+		WARN_ON(drv_data->dma_mapped != 0);
+		drv_data->dma_mapped = 0;
+	}
+
+	drv_data->transfer_handler = interrupt_transfer;
+	iowrite32(drv_data->clear_sr, reg + SSSR);
+	cr1 = chip->cr1;
+	iowrite32(chip->timeout, reg + SSTO);
+
+	if (likely(drv_data->dma_mapped))
+		cr1 |= drv_data->dma_cr1;
+	else if (!chip->poll_mode)
+		cr1 |= drv_data->int_cr1;
+
+	dev_dbg(&drv_data->pdev->dev,
+		"%s drv_data:%p len:%d n_bytes:%d cr0:%x cr1:%x",
+		(drv_data->dma_mapped ? "DMA io:" :
+		 (chip->poll_mode ? "Poll io:" : "Intr io:")),
+		drv_data, drv_data->len, drv_data->n_bytes, cr0, cr1);
+
+	/* see if we need to reload the config registers */
+	truth = ioread32(reg + SSCR0) != cr0 ||
+		((ioread32(reg + SSCR1) & SSCR1_CHANGE_MASK) !=
+		 (cr1 & SSCR1_CHANGE_MASK));
+	if (unlikely(truth)) {
+		/* stop the SSP, and update the other bits */
+		iowrite32(cr0 & ~SSCR0_SSE, reg + SSCR0);
+		/* first set CR1 without interrupt and service enables */
+		iowrite32(cr1 & SSCR1_CHANGE_MASK, reg + SSCR1);
+		/* restart the SSP */
+		iowrite32(cr0, reg + SSCR0);
+	}
+
+	/* after chip select, release the data by enabling service
+	 * requests and interrupts, without changing any mode bits */
+	iowrite32(cr1, reg + SSCR1);
+
+	if (likely(drv_data->dma_mapped)) {
+		/* transfer using DMA */
+		dma_transfer(drv_data);
+	} else if (chip->poll_mode) {
+		/* transfer using non interrupt polling */
+		tasklet_schedule(&drv_data->poll_transfer);
+	}
+	/*
+	 * if not using dma or poll-mode, transfers are done
+	 * using interrupt driven programmed I/O
+	 */
+
+	return 0;
+}
+
+/**
+ * setup()	- configures hardware according to given @chip
+ * @espi:	ep93xx SPI controller struct
+ * @chip:	chip specific settings
+ *
+ * This function sets up the actual hardware registers with settings given in
+ * @chip
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+	uint tx_thres = TX_THRESH_DFLT;
+	uint rx_thres = RX_THRESH_DFLT;
+	u32 clk_div;
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+
+	if (drv_data->pwrstate == PWRSTATE_OFF) {
+		dev_dbg(&drv_data->pdev->dev, "setup: busy, pwrstate:%d",
+			drv_data->pwrstate);
+		return -EIO;
+	}
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	if ((spi->bits_per_word < 4 || spi->bits_per_word > 32))
+		return -EINVAL;
+
+	/* Only alloc on first setup */
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev,
+				"failed setup: can't allocate chip data");
+			return -ENOMEM;
+		}
+
+		chip->timeout = TIMOUT_DFLT;
+	}
+
+	/*
+	 *  protocol drivers may change the chip settings, so...
+	 * if chip_info exists, use it
+	 */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	if (chip_info) {
+		if (chip_info->timeout)
+			chip->timeout = chip_info->timeout;
+
+		if (chip_info->tx_threshold)
+			tx_thres = chip_info->tx_threshold;
+		if (chip_info->rx_threshold)
+			rx_thres = chip_info->rx_threshold;
+	}
+	chip->enable_dma = TESTMODE(TESTMODE_ENABLE_DMA);
+	chip->poll_mode = TESTMODE(TESTMODE_ENABLE_POLL);
+	chip->enable_loopback = TESTMODE(TESTMODE_ENABLE_LOOPBACK);
+
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+		chip->read = u8_reader;
+		chip->write = u8_writer;
+
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+		chip->read = u16_reader;
+		chip->write = u16_writer;
+	} else if (spi->bits_per_word <= 32) {
+		chip->n_bytes = 4;
+		chip->read = u32_reader;
+		chip->write = u32_writer;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize");
+		return -ENODEV;
+	}
+
+	if (chip->enable_dma) {
+		intel_mid_ssp_spi_dma_init(drv_data);
+		set_dma_width(drv_data, spi->bits_per_word);
+	}
+
+	chip->speed_hz = spi->max_speed_hz;
+	clk_div = ssp_get_clk_div(chip->speed_hz);
+
+	chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
+		(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+	chip->bits_per_word = spi->bits_per_word;
+
+	chip->cr0 = PNWL_CR0(clk_div, spi->bits_per_word, spi, chip);
+	chip->cr1 = PNWL_CR1(spi, chip);
+
+	dev_dbg(&spi->dev,
+		"KHz:%d bpw:%d mode:%d dma:%d poll:%d loop:%d cr0:%x cr1:%x",
+		100000 / clk_div, spi->bits_per_word, spi->mode & 0x3,
+		chip->enable_dma, chip->poll_mode, chip->enable_loopback,
+		chip->cr0, chip->cr1);
+
+	spi_set_ctldata(spi, chip);
+
+	return 0;
+}
+
+/**
+ * cleanup()	- cleans up master controller specific state
+ * @spi:	SPI device to cleanup
+ *
+ * This function releases master controller specific state for given @spi
+ * device.
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+
+	if (drv_data->dma_inited)
+		intel_mid_ssp_spi_dma_exit(drv_data);
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct driver_data *drv_data;
+	int status = 0;
+	int pci_bar = 0;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+	u8 ssp_cfg;
+	int pos;
+
+	/* Check if the SSP we are probed for has been allocated to  */
+	/* operate as SPI master. This information is get from the   */
+	/* field adid of the Vendor-Specific PCI capability which is */
+	/* used as a configuration register.                         */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability");
+		goto err_abort_probe;
+	}
+	if ((SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) ||
+		SSP_CFG_IS_SPI_SLAVE(ssp_cfg)) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)",
+			ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(&pdev->dev, "found PCI SSP controller(ID: %04xh:%04xh"
+		" cfg: %02xh)", pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for drv_data and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct driver_data));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_master");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	drv_data = spi_master_get_devdata(master);
+	drv_data->master = master;
+	drv_data->pdev = pdev;
+	drv_data->pwrstate = PWRSTATE_ON;
+	drv_data->wq = create_workqueue(DRIVER_NAME);
+	INIT_WORK(&drv_data->resume_transfer_work, resume_transfer_work);
+
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
+	master->num_chipselect = 1;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+
+	/* get basic io resource and map it */
+	drv_data->paddr = pci_resource_start(pdev, pci_bar);
+	drv_data->iolen = pci_resource_len(pdev, pci_bar);
+
+	status = pci_request_region(pdev, pci_bar, dev_name(dev));
+	if (status)
+		goto err_free_1;
+
+	drv_data->ioaddr = ioremap_nocache(drv_data->paddr, drv_data->iolen);
+	if (!drv_data->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", drv_data->paddr);
+	dev_dbg(dev, "ioaddr = : %p", drv_data->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x", pdev->irq);
+
+	/* Attach to IRQ */
+	drv_data->irq = pdev->irq;
+
+	status = request_irq(drv_data->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", drv_data);
+	if (status < 0) {
+		dev_err(dev, "can not get IRQ %d", drv_data->irq);
+		goto err_free_3;
+	}
+
+	/* get base address of DMA selector. */
+	syscfg = drv_data->paddr - SYSCFG;
+	syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+	if (!syscfg_ioaddr) {
+		status = -ENOMEM;
+		goto err_free_3;
+	}
+
+	iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+
+	drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
+	drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL;
+	drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
+	drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
+
+	tasklet_init(&drv_data->poll_transfer,
+		     poll_transfer, (unsigned long)drv_data);
+
+	/* Load default SSP configuration */
+	dev_info(dev, "setup default SSP configuration");
+	iowrite32(0, drv_data->ioaddr + SSCR0);
+	iowrite32(SSCR1_RxTresh(RX_THRESH_DFLT) |
+		    SSCR1_TxTresh(TX_THRESH_DFLT),
+		    drv_data->ioaddr + SSCR1);
+	iowrite32(SSCR0_Motorola | SSCR0_DataSize(8), drv_data->ioaddr + SSCR0);
+	iowrite32(0, drv_data->ioaddr + SSTO);
+	iowrite32(PNWL_SSPSP, drv_data->ioaddr + SSPSP);
+
+	/* Register with the SPI framework */
+	dev_info(&pdev->dev, "register with SPI framework (as SPI%d)",
+		 master->bus_num);
+	status = spi_register_master(master);
+	if (status != 0) {
+		dev_err(dev, "problem registering driver");
+		goto err_free_4;
+	}
+
+	pci_set_drvdata(pdev, drv_data);
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_idle(&pdev->dev);
+	pm_runtime_allow(dev);
+
+	return status;
+
+err_free_4:
+	free_irq(drv_data->irq, drv_data);
+err_free_3:
+	iounmap(drv_data->ioaddr);
+err_free_2:
+	pci_release_region(pdev, pci_bar);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	if (!drv_data)
+		return;
+
+	pci_set_drvdata(pdev, NULL);
+
+	free_irq(drv_data->irq, drv_data);
+
+	iounmap(drv_data->ioaddr);
+
+	pci_release_region(pdev, 0);
+
+	spi_unregister_master(drv_data->master);
+
+	pci_disable_device(pdev);
+
+	return;
+}
+
+/*
+ * for now IDLE and OFF states are treated the same
+ */
+static int _pm_suspend(struct pci_dev *pdev, int to)
+{
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+	void *reg = drv_data->ioaddr;
+	int from = drv_data->pwrstate;
+	u32 sssr;
+
+
+	if (to != PWRSTATE_IDLE && to != PWRSTATE_OFF) {
+		dev_err(&pdev->dev, "ERROR: suspend: invalid dst pwrstate %x",
+			to);
+		return -EINVAL;
+	}
+
+	switch (from) {
+	case PWRSTATE_ON:
+		dev_dbg(&pdev->dev, "suspend: turn off SSP");
+		if (have_fifo_data(drv_data, &sssr)) {
+			dev_err(&pdev->dev,
+				"ERROR: suspend: i/o present! sssr:%x", sssr);
+			return -EBUSY;
+		}
+		tasklet_disable(&drv_data->poll_transfer);
+		drv_data->pwrstate = to;
+		iowrite32(0, reg + SSCR0);
+		dev_dbg(&pdev->dev, "suspend: cr0:%x cr1:%x sssr:%x",
+			ioread32(reg + SSCR0), ioread32(reg + SSCR1),
+			ioread32(reg + SSSR));
+		break;
+	case PWRSTATE_IDLE:
+	case PWRSTATE_OFF:
+		drv_data->pwrstate = to;
+		break;
+	default:
+		dev_err(&pdev->dev, "ERROR: suspend: invalid src pwrstate %x",
+			from);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * for now IDLE and OFF states are treated the same
+ */
+static void _pm_resume(struct pci_dev *pdev)
+{
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+	void *reg = drv_data->ioaddr;
+
+	switch (drv_data->pwrstate) {
+	default:
+		dev_err(&pdev->dev, "ERROR: resume: invalid src pwrstate %x",
+			drv_data->pwrstate);
+		/* fall through ... */
+	case PWRSTATE_IDLE:
+	case PWRSTATE_OFF:
+		dev_dbg(&pdev->dev, "resume: turn on SSP");
+
+		/*
+		 * we don't bother reconfiguring the registers
+		 * on resume - that will get done when transfer()
+		 * is called
+		 */
+		tasklet_enable(&drv_data->poll_transfer);
+		dev_dbg(&pdev->dev, "resume: cr0:%x cr1:%x sssr:%x",
+			ioread32(reg + SSCR0), ioread32(reg + SSCR1),
+			ioread32(reg + SSSR));
+
+		drv_data->pwrstate = PWRSTATE_ON;
+		break;
+	case PWRSTATE_ON:
+		break;
+	}
+}
+
+
+#ifdef CONFIG_PM
+
+static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int retval;
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s called", __func__);
+	if (drv_data->pwrstate != PWRSTATE_ON)
+		dev_warn(&pdev->dev, "suspend: !on, pwrstate:%d",
+			 drv_data->pwrstate);
+	retval = _pm_suspend(pdev, PWRSTATE_OFF);
+	if (retval)
+		return retval;
+	retval = pci_prepare_to_sleep(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "suspend: prepare to sleep failed");
+		return retval;
+	}
+	pci_disable_device(pdev);
+	return 0;
+}
+
+static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
+{
+	int retval;
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(&pdev->dev, "%s called", __func__);
+	if (drv_data->pwrstate != PWRSTATE_OFF)
+		dev_warn(&pdev->dev, "resume: !off, pwrstate:%d",
+			 drv_data->pwrstate);
+	retval = pci_enable_device(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "resume: back from sleep failed");
+		return retval;
+	}
+	retval = pci_back_from_sleep(pdev);
+	if (retval) {
+		dev_err(&pdev->dev, "resume: back from sleep failed");
+		return retval;
+	}
+	_pm_resume(pdev);
+	return 0;
+}
+
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#endif /* CONFIG_PM */
+
+
+static int intel_mid_ssp_spi_pm_runtime_resume(struct device *dev)
+{
+	int retval;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s called", __func__);
+	if (drv_data->pwrstate == PWRSTATE_ON)
+		return 0;
+	if (drv_data->pwrstate != PWRSTATE_IDLE)
+		dev_warn(&pdev->dev, "rt resume: !idle, pwrstate:%d",
+			 drv_data->pwrstate);
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	retval = pci_enable_device(pdev);
+	if (retval)
+		return retval;
+	_pm_resume(pdev);
+
+	return retval;
+}
+
+static int intel_mid_ssp_spi_pm_runtime_suspend(struct device *dev)
+{
+	int retval;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct driver_data *drv_data = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s called", __func__);
+	if (drv_data->pwrstate != PWRSTATE_ON)
+		dev_warn(&pdev->dev, "rt suspend: !on, pwrstate:%d",
+			 drv_data->pwrstate);
+	retval = _pm_suspend(pdev, PWRSTATE_IDLE);
+	if (retval)
+		return retval;
+	pci_save_state(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return retval;
+}
+
+static int intel_mid_ssp_spi_pm_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "%s called", __func__);
+	pm_schedule_suspend(dev, 100);
+	return -EBUSY;
+}
+
+static const struct dev_pm_ops intel_mid_ssp_spi_pm = {
+	.runtime_resume = intel_mid_ssp_spi_pm_runtime_resume,
+	.runtime_suspend = intel_mid_ssp_spi_pm_runtime_suspend,
+	.runtime_idle =  intel_mid_ssp_spi_pm_runtime_idle,
+};
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+	{ PCI_VDEVICE(INTEL, 0x0816) },
+	{ }
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.driver = {
+		.pm = &intel_mid_ssp_spi_pm,
+	},
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	__devexit_p(intel_mid_ssp_spi_remove),
+	.suspend =	intel_mid_ssp_spi_suspend,
+	.resume =	intel_mid_ssp_spi_resume,
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+module_exit(intel_mid_ssp_spi_exit);
diff --git a/drivers/spi/intel_mid_ssp_spi_def.h b/drivers/spi/intel_mid_ssp_spi_def.h
new file mode 100644
index 0000000..4610d62
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi_def.h
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (C) Intel 2010
+ *  Ken Mills <ken.k.mills-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_DEF_H_
+#define INTEL_MID_SSP_SPI_DEF_H_
+
+
+/*
+ * Penwell SSP register definitions
+ */
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)	/* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola	      (0x0 << 4)	 /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS	      (1 << 20)	/* Extended data size select */
+#define SSCR0_NCS   (1 << 21)		/* Network clock select */
+#define SSCR0_RIM    (1 << 22)		 /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)		/* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)	    /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24)	/* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)		/* Audio clock select */
+#define SSCR0_MOD  (1 << 31)	       /* Mode (normal or network) */
+
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS	     (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF     (1 << 2) /* Transmit FIFO Not Full */
+#define SSSR_RNE     (1 << 3) /* Receive FIFO Not Empty */
+#define SSSR_BSY     (1 << 4) /* SSP Busy */
+#define SSSR_TFS     (1 << 5) /* Transmit FIFO Service Request */
+#define SSSR_RFS     (1 << 6) /* Receive FIFO Service Request */
+#define SSSR_ROR    (1 << 7) /* Receive FIFO Overrun */
+#define SSSR_TFL     (0x0f00) /* Transmit FIFO Level (mask) */
+#define SSSR_RFL     (0xf000) /* Receive FIFO Level (mask) */
+
+#define SSCR0_TIM    (1 << 23)		 /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)		 /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS   (1 << 21)		/* Network Clock Select */
+#define SSCR0_EDSS	      (1 << 20)	/* Extended Data Size Select */
+
+#define SSCR0_TISSP	       (1 << 4) /* TI Sync Serial Protocol */
+#define SSCR0_PSP   (3 << 4) /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP	       (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE    (1 << 30)		 /* TXD Tristate Enable */
+#define SSCR1_EBCEI	       (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR (1 << 28)	       /* Slave Clock free Running */
+#define SSCR1_ECRA (1 << 27)	       /* Enable Clock Request A */
+#define SSCR1_ECRB (1 << 26)	       /* Enable Clock request B */
+#define SSCR1_SCLKDIR	     (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR	    (1 << 24)		/* Frame Direction */
+#define SSCR1_RWOT	      (1 << 23)		  /* Receive Without Transmit */
+#define SSCR1_TRAIL (1 << 22)		/* Trailing Byte */
+#define SSCR1_TSRE (1 << 21)	       /* Transmit Service Request Enable */
+#define SSCR1_RSRE (1 << 20)	       /* Receive Service Request Enable */
+#define SSCR1_TINTE (1 << 19)		/* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE	       (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_IFS		(1 << 16)	/* Invert Frame Signal */
+#define SSCR1_STRF (1 << 15)	       /* Select FIFO or EFWR */
+#define SSCR1_EFWR	      (1 << 14)		  /* Enable FIFO Write/Read */
+
+#define SSSR_BCE     (1 << 23)		 /* Bit Count Error */
+#define SSSR_CSS     (1 << 22)		 /* Clock Synchronisation Status */
+#define SSSR_TUR     (1 << 21)		 /* Transmit FIFO Under Run */
+#define SSSR_EOC    (1 << 20)		/* End Of Chain */
+#define SSSR_TINT     (1 << 19)		  /* Receiver Time-out Interrupt */
+#define SSSR_PINT    (1 << 18)	    /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT (1 << 25)	       /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23)	     /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x) ((x) << 16)	   /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)	      /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)	      /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)	       /* Start Delay */
+#define SSPSP_ETDS	      (1 << 3) /* End of Transfer data State */
+#define SSPSP_SFRMP	     (1 << 2) /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)	  ((x) << 0)	       /* Serial Bit Rate Clock Mode */
+
+#define SSCR0	0x00
+#define SSCR1	0x04
+#define SSSR	0x08
+#define SSITR	0x0c
+#define SSDR	0x10
+#define SSTO	0x28
+#define SSPSP	0x2c
+#define SYSCFG	0x20bc0
+
+/* SSP assignement configuration from PCI config */
+#define SSP_CFG_GET_MODE(ssp_cfg)	((ssp_cfg) & 0x07)
+#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)	(((ssp_cfg) >> 3) & 0x07)
+#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)	((ssp_cfg) & 0x40)
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	u8 tx_threshold;
+	u8 rx_threshold;
+	u8 dma_burst_size;
+	u32 timeout;
+	u16 extra_data[5];
+};
+
+
+#endif /* INTEL_MID_SSP_SPI_DEF_H_ */
-- 
1.7.2.3


------------------------------------------------------------------------------
Special Offer-- Download ArcSight Logger for FREE (a $49 USD value)!
Finally, a world-class log management solution at an even better price-free!
Download using promo code Free_Logger_4_Dev2Dev. Offer expires 
February 28th, so secure your free ArcSight Logger TODAY! 
http://p.sf.net/sfu/arcsight-sfd2d

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-02 21:01 ` Russ Gorby
@ 2011-02-02 21:03   ` Mark Brown
  2011-02-02 22:26   ` Alan Cox
  2011-02-02 22:40   ` Alan Cox
  2 siblings, 0 replies; 13+ messages in thread
From: Mark Brown @ 2011-02-02 21:03 UTC (permalink / raw)
  To: Russ Gorby
  Cc: David Brownell, Grant Likely, open list:SPI SUBSYSTEM, open list

On Wed, Feb 02, 2011 at 01:01:52PM -0800, Russ Gorby wrote:
> SPI master controller driver for the Intel MID platform Medfield
> This driver uses the Penwell SSP controller and configures it to
> be a SPI device (spibus 3). This bus supports a single device -
> the 3G SPI modem that can operate up to 25Mhz.

The same hardware is also used for audio I believe - how do the two
drivers share the hardware?

> +#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
> +#define SSCR0_DataSize(x)  ((x) - 1)	/* Data Size Select [4..16] */
> +#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
> +#define SSCR0_Motorola	      (0x0 << 4)	 /* Motorola's SPI mode */
> +#define SSCR0_ECS   (1 << 6) /* External clock select */
> +#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */

There certainly looks to be overlap with the register definitions.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-02 21:01 ` Russ Gorby
  2011-02-02 21:03   ` Mark Brown
@ 2011-02-02 22:26   ` Alan Cox
  2011-02-02 22:40   ` Alan Cox
  2 siblings, 0 replies; 13+ messages in thread
From: Alan Cox @ 2011-02-02 22:26 UTC (permalink / raw)
  To: Russ Gorby
  Cc: David Brownell, Grant Likely, open list:SPI SUBSYSTEM, open list

On Wed,  2 Feb 2011 13:01:52 -0800
Russ Gorby <russ.gorby@intel.com> wrote:

> SPI master controller driver for the Intel MID platform Medfield
> This driver uses the Penwell SSP controller and configures it to
> be a SPI device (spibus 3). This bus supports a single device -
> the 3G SPI modem that can operate up to 25Mhz.

NAK this.

We have an existing development driver that covers 0x0815, 0x0816, 0x0825,
0x0832 in a single driver which needs tidying up and double checking on
all the relevant Medfield and Moorestown devices and is based on work done
by Mathieu Soulard.

All these devices can be handled by a single driver, and should be.

Alan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-02 21:01 ` Russ Gorby
  2011-02-02 21:03   ` Mark Brown
  2011-02-02 22:26   ` Alan Cox
@ 2011-02-02 22:40   ` Alan Cox
  2011-02-03 13:28     ` Mark Brown
  2011-02-14 19:09     ` Grant Likely
  2 siblings, 2 replies; 13+ messages in thread
From: Alan Cox @ 2011-02-02 22:40 UTC (permalink / raw)
  To: Russ Gorby
  Cc: David Brownell, Grant Likely, open list:SPI SUBSYSTEM, open list

On Wed,  2 Feb 2011 13:01:52 -0800
Russ Gorby <russ.gorby@intel.com> wrote:

> SPI master controller driver for the Intel MID platform Medfield
> This driver uses the Penwell SSP controller and configures it to
> be a SPI device (spibus 3). This bus supports a single device -
> the 3G SPI modem that can operate up to 25Mhz.

And this is the unified one that handles all the devices, but I gather
may need some fixing/test work on Medfield.

(and the only reason you haven't seen this submitted yet is I was
 asked to wait until it had been tested on those platforms. So I'm
 very surprised to see the other submission)

Alan

--

From: Mathieu SOULARD <mathieux.soulard@intel.com>

intel_mid_ssp_spi:  Moorestown and Medfield SPI for SSP devices
    
This driver is a fusion of various internal drivers into a single
driver for the SPI slave/master on the Intel Moorestown and Medfield
SSP devices.
    
Signed-off-by: Mathieu SOULARD <mathieux.soulard@intel.com>
[Ported to the -next tree DMA engine]
Signed-off-by: Alan Cox <alan@linux.intel.com>

diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index bb233a9..6d1a41f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -178,6 +178,14 @@ config SPI_IMX
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel MID platforms (EXPERIMENTAL)"
+	depends on SPI_MASTER && INTEL_MID_DMAC && EXPERIMENTAL
+	help
+	  This is the unified SSP SPI slave controller driver for the Intel
+	  MID platforms, handling Moorestown & Medfield, master & slave
+	  clock mode.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 86d1b5f..6e052b5 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_SPI_DW_MMIO)		+= dw_spi_mmio.o
 obj-$(CONFIG_SPI_EP93XX)		+= ep93xx_spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi_gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi_imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= intel_mid_ssp_spi.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi_lm70llp.o
 obj-$(CONFIG_SPI_PXA2XX)		+= pxa2xx_spi.o
 obj-$(CONFIG_SPI_PXA2XX_PCI)		+= pxa2xx_spi_pci.o
diff --git a/drivers/spi/intel_mid_ssp_spi.c b/drivers/spi/intel_mid_ssp_spi.c
new file mode 100644
index 0000000..2d6d881
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi.c
@@ -0,0 +1,1403 @@
+/*
+ * intel_mid_ssp_spi.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos_params.h>
+
+#include <linux/spi/spi.h>
+#include "intel_mid_ssp_spi.h"
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static const struct pci_device_id pci_ids[];
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+	int tlen1 = (len < sz ? len : sz);
+	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
+	unsigned char *p;
+	static char msg[MAX_SPI_TRANSFER_SIZE];
+
+	memset(msg, '\0', sizeof(msg));
+	p = buf;
+	while (p < buf + tlen1)
+		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+	if (tlen2 > 0) {
+		sprintf(msg, "%s .....", msg);
+		p = (buf+len) - tlen2;
+		while (p < buf + len)
+			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+	}
+
+	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+		   len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+	u32 sssr;
+	sssr = read_SSSR(drv_context->ioaddr);
+	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+		return 0;
+	else
+		return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
+{
+	return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u32 i = 0;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (!is_tx_fifo_empty(drv_context)) {
+		dev_err(&drv_context->pdev->dev,
+				"TX FIFO not empty. Reset of SPI IF");
+		disable_interface(drv_context);
+		return;
+	}
+
+	dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+	while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
+		read_SSDR(reg);
+		i++;
+	}
+	WARN(i > 0, "%d words flush occured\n", i);
+
+	return;
+}
+
+static int null_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u8 n_bytes = drv_context->n_bytes;
+
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(0, reg);
+	drv_context->tx += n_bytes;
+
+	return 1;
+}
+
+static int null_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	u8 n_bytes = drv_context->n_bytes;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		read_SSDR(reg);
+		drv_context->rx += n_bytes;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u8_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u8 *)(drv_context->tx), reg);
+	++drv_context->tx;
+
+	return 1;
+}
+
+static int u8_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u8 *)(drv_context->rx) = read_SSDR(reg);
+		++drv_context->rx;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u16_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u16 *)(drv_context->tx), reg);
+	drv_context->tx += 2;
+
+	return 1;
+}
+
+static int u16_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u16 *)(drv_context->rx) = read_SSDR(reg);
+		drv_context->rx += 2;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static int u32_writer(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (drv_context->tx == drv_context->tx_end))
+		return 0;
+
+	write_SSDR(*(u32 *)(drv_context->tx), reg);
+	drv_context->tx += 4;
+
+	return 1;
+}
+
+static int u32_reader(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (drv_context->rx < drv_context->rx_end)) {
+		*(u32 *)(drv_context->rx) = read_SSDR(reg);
+		drv_context->rx += 4;
+	}
+
+	return drv_context->rx == drv_context->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct ssp_driver_context *drv_context =
+		(struct ssp_driver_context *)param;
+	bool ret = false;
+
+	if (!drv_context->dmac1)
+		return ret;
+
+	if (chan->device->dev == &drv_context->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @drv_context:	Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (!drv_context->dma_mapped)
+		return;
+	dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
+		PCI_DMA_FROMDEVICE);
+	dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
+		PCI_DMA_TODEVICE);
+	drv_context->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg:	Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+	struct callback_param *cb_param = (struct callback_param *)arg;
+	struct ssp_driver_context *drv_context = cb_param->drv_context;
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+
+	if (cb_param->direction == TX_DIRECTION)
+		drv_context->txdma_done = 1;
+	else
+		drv_context->rxdma_done = 1;
+
+	dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+		cb_param->direction, drv_context->rxdma_done,
+		drv_context->txdma_done);
+
+	if (drv_context->txdma_done && drv_context->rxdma_done) {
+		/* Clear Status Register */
+		write_SSSR(drv_context->clear_sr, reg);
+		dev_dbg(dev, "DMA done\n");
+		/* Disable Triggers to DMA or to CPU*/
+		disable_triggers(drv_context);
+		unmap_dma_buffers(drv_context);
+
+		queue_work(drv_context->dma_wq, &drv_context->complete_work);
+	}
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @drv_context:	Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	struct dma_slave_config *ds;
+	dma_cap_mask_t mask;
+	struct device *dev = &drv_context->pdev->dev;
+	unsigned int device_id;
+
+	/* Configure RX channel parameters */
+	rxs = &drv_context->dmas_rx;
+	ds = &rxs->dma_slave;
+
+	ds->direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	ds->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ds->src_addr_width = drv_context->n_bytes;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (drv_context->rx_fifo_threshold == 8) {
+		ds->src_maxburst = 8;
+		ds->dst_maxburst = 8;
+	} else if (drv_context->rx_fifo_threshold == 4) {
+		ds->src_maxburst = 4;
+		ds->dst_maxburst = 4;
+	} else {
+		ds->src_maxburst = 1;
+		ds->dst_maxburst = 1;
+	}
+
+	/* Configure TX channel parameters */
+	txs = &drv_context->dmas_tx;
+	ds = &txs->dma_slave;
+
+	ds->direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	ds->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	ds->dst_addr_width = drv_context->n_bytes;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (drv_context->rx_fifo_threshold == 8) {
+		ds->src_maxburst = 8;
+		ds->dst_maxburst = 8;
+	} else if (drv_context->rx_fifo_threshold == 4) {
+		ds->src_maxburst = 4;
+		ds->dst_maxburst = 4;
+	} else {
+		ds->src_maxburst = 1;
+		ds->dst_maxburst = 1;
+	}
+
+	/* Nothing more to do if already initialized */
+	if (drv_context->dma_initialized)
+		return;
+
+	/* Use DMAC1 */
+	if (drv_context->quirks & QUIRKS_PLATFORM_MRST)
+		device_id = PCI_MRST_DMAC1_ID;
+	else
+		device_id = PCI_MDFL_DMAC1_ID;
+
+	drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
+							device_id, NULL);
+
+	if (!drv_context->dmac1) {
+		dev_err(dev, "Can't find DMAC1");
+		return;
+	}
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+		drv_context->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+				2 * MAX_SPI_TRANSFER_SIZE);
+		if (drv_context->virt_addr_sram_rx)
+			drv_context->virt_addr_sram_tx =
+				drv_context->virt_addr_sram_rx +
+				MAX_SPI_TRANSFER_SIZE;
+		else
+			dev_err(dev, "Virt_addr_sram_rx is null\n");
+	}
+
+	/* 1. Allocate rx channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	drv_context->rxchan = dma_request_channel(mask, chan_filter,
+		drv_context);
+	if (!drv_context->rxchan)
+		goto err_exit;
+
+	drv_context->rxchan->private = rxs;
+
+	/* 2. Allocate tx channel */
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	drv_context->txchan = dma_request_channel(mask, chan_filter,
+		drv_context);
+
+	if (!drv_context->txchan)
+		goto free_rxchan;
+	else
+		drv_context->txchan->private = txs;
+
+	/* set the dma done bit to 1 */
+	drv_context->txdma_done = 1;
+	drv_context->rxdma_done = 1;
+
+	drv_context->tx_param.drv_context  = drv_context;
+	drv_context->tx_param.direction = TX_DIRECTION;
+	drv_context->rx_param.drv_context  = drv_context;
+	drv_context->rx_param.direction = RX_DIRECTION;
+
+	drv_context->dma_initialized = 1;
+
+	return;
+
+free_rxchan:
+	dma_release_channel(drv_context->rxchan);
+err_exit:
+	dev_err(dev, "Error : DMA Channel Not available\n");
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(drv_context->virt_addr_sram_rx);
+
+	pci_dev_put(drv_context->dmac1);
+	return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @drv_context:	Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
+{
+	dma_release_channel(drv_context->txchan);
+	dma_release_channel(drv_context->rxchan);
+
+	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(drv_context->virt_addr_sram_rx);
+
+	pci_dev_put(drv_context->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @drv_context:	Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_driver_context *drv_context)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &drv_context->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
+
+	if (drv_context->tx_dma)
+		drv_context->txdma_done = 0;
+
+	if (drv_context->rx_dma)
+		drv_context->rxdma_done = 0;
+
+	/* 2. prepare the RX dma transfer */
+	txchan = drv_context->txchan;
+	rxchan = drv_context->rxchan;
+
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* Since the DMA is configured to do 32bits access */
+		/* to/from the DDR, the DMA transfer size must be  */
+		/* a multiple of 4 bytes                           */
+		drv_context->len_dma_rx = drv_context->len & ~(4 - 1);
+		drv_context->len_dma_tx = drv_context->len_dma_rx;
+
+		/* In Rx direction, TRAIL Bytes are handled by memcpy */
+		if (drv_context->rx_dma &&
+			(drv_context->len_dma_rx >
+			drv_context->rx_fifo_threshold * drv_context->n_bytes))
+			drv_context->len_dma_rx =
+					TRUNCATE(drv_context->len_dma_rx,
+					drv_context->rx_fifo_threshold *
+					drv_context->n_bytes);
+		else if (!drv_context->rx_dma)
+			dev_err(dev, "ERROR : rx_dma is null\r\n");
+	} else {
+		/* TRAIL Bytes are handled by DMA */
+		if (drv_context->rx_dma) {
+			drv_context->len_dma_rx = drv_context->len;
+			drv_context->len_dma_tx = drv_context->len;
+		} else {
+			dev_err(dev, "ERROR : drv_context->rx_dma is null!\n");
+		}
+	}
+
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,				/* DMA Channel */
+		drv_context->rx_dma,			/* DAR */
+		ssdr_addr,				/* SAR */
+		drv_context->len_dma_rx,		/* Data Length */
+		flag);					/* Flag */
+
+	if (rxdesc) {
+		rxdesc->callback = intel_mid_ssp_spi_dma_done;
+		rxdesc->callback_param = &drv_context->rx_param;
+	} else {
+		dev_dbg(dev, "rxdesc is null! (len_dma_rx:%d)\n",
+			drv_context->len_dma_rx);
+		drv_context->rxdma_done = 1;
+	}
+
+	/* 3. prepare the TX dma transfer */
+	if (drv_context->tx_dma) {
+		txdesc = txchan->device->device_prep_dma_memcpy
+		(txchan,				/* DMA Channel */
+		ssdr_addr,				/* DAR */
+		drv_context->tx_dma,			/* SAR */
+		drv_context->len_dma_tx,		/* Data Length */
+		flag);					/* Flag */
+		if (txdesc) {
+			txdesc->callback = intel_mid_ssp_spi_dma_done;
+			txdesc->callback_param = &drv_context->tx_param;
+		} else {
+			dev_dbg(dev, "txdesc is null! (len_dma_tx:%d)\n",
+				drv_context->len_dma_tx);
+			drv_context->txdma_done = 1;
+		}
+	} else {
+		dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
+		return;
+	}
+
+	dev_info(dev, "DMA transfer len:%d len_dma_tx:%d len_dma_rx:%d\n",
+		drv_context->len, drv_context->len_dma_tx,
+		drv_context->len_dma_rx);
+
+	if (rxdesc || txdesc) {
+		if (rxdesc) {
+			dev_dbg(dev, "Firing DMA RX channel\n");
+			rxdesc->tx_submit(rxdesc);
+		}
+		if (txdesc) {
+			dev_dbg(dev, "Firing DMA TX channel\n");
+			txdesc->tx_submit(txdesc);
+		}
+	} else {
+		struct callback_param cb_param;
+		cb_param.drv_context = drv_context;
+		dev_dbg(dev, "Bypassing DMA transfer\n");
+		intel_mid_ssp_spi_dma_done(&cb_param);
+	}
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @drv_context:	Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (unlikely(drv_context->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped\n");
+		return 0;
+	}
+	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+		/* Copy drv_context->tx into sram_tx */
+		memcpy_toio(drv_context->virt_addr_sram_tx, drv_context->tx,
+			drv_context->len);
+#ifdef DUMP_RX
+		dump_trailer(&drv_context->pdev->dev, drv_context->tx,
+			drv_context->len, 16);
+#endif
+		drv_context->rx_dma = SRAM_RX_ADDR;
+		drv_context->tx_dma = SRAM_TX_ADDR;
+	} else {
+		/* no QUIRKS_SRAM_ADDITIONAL_CPY */
+		if (unlikely(drv_context->dma_mapped))
+			return 1;
+
+		drv_context->tx_dma =
+			dma_map_single(dev, drv_context->tx, drv_context->len,
+				PCI_DMA_TODEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_context->tx_dma))) {
+			dev_err(dev, "ERROR : tx dma mapping failed\n");
+			return 0;
+		}
+
+		drv_context->rx_dma =
+			dma_map_single(dev, drv_context->rx, drv_context->len,
+				PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(dev, drv_context->rx_dma))) {
+			dma_unmap_single(dev, drv_context->tx_dma,
+				drv_context->len, DMA_TO_DEVICE);
+			dev_err(dev, "ERROR : rx dma mapping failed\n");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @drv_context:	Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_driver_context *drv_context)
+{
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+
+	if (drv_context->len != drv_context->len_dma_rx) {
+		dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+			read_SSSR(reg));
+		drv_context->rx += drv_context->len_dma_rx;
+		drv_context->tx += drv_context->len_dma_tx;
+
+		while ((drv_context->tx != drv_context->tx_end) ||
+			(drv_context->rx != drv_context->rx_end)) {
+			drv_context->read(drv_context);
+			drv_context->write(drv_context);
+		}
+	}
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @drv_context:	Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
+{
+	u32 length = drv_context->len;
+
+	if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+		&& (drv_context->len > drv_context->rx_fifo_threshold *
+		drv_context->n_bytes))
+		length = TRUNCATE(drv_context->len,
+			drv_context->rx_fifo_threshold * drv_context->n_bytes);
+
+	memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_driver_context *drv_context)
+{
+	void *reg = drv_context->ioaddr;
+	struct spi_message *msg;
+	struct device *dev = &drv_context->pdev->dev;
+
+	if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+		pm_qos_update_request(&drv_context->pm_qos_req,
+					PM_QOS_DEFAULT_VALUE);
+
+	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+		sram_to_ddr_cpy(drv_context);
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+		drain_trail(drv_context);
+	else
+		/* Stop getting Time Outs */
+		write_SSTO(0, reg);
+
+	drv_context->cur_msg->status = 0;
+	drv_context->cur_msg->actual_length = drv_context->len;
+
+#ifdef DUMP_RX
+	dump_trailer(dev, drv_context->rx, drv_context->len, 16);
+#endif
+
+	dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+	msg = drv_context->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+	struct ssp_driver_context *drv_context = container_of(work,
+				struct ssp_driver_context, complete_work);
+
+	int_transfer_complete(drv_context);
+}
+
+static void poll_transfer_complete(struct ssp_driver_context *drv_context)
+{
+	struct spi_message *msg;
+
+	/* Update total byte transfered return count actual bytes read */
+	drv_context->cur_msg->actual_length +=
+		drv_context->len - (drv_context->rx_end - drv_context->rx);
+
+	drv_context->cur_msg->status = 0;
+
+	msg = drv_context->cur_msg;
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct ssp_driver_context *drv_context = dev_id;
+	void *reg = drv_context->ioaddr;
+	struct device *dev = &drv_context->pdev->dev;
+	u32 status = read_SSSR(reg);
+
+	/* It should never be our interrupt since SSP will */
+	/* only trigs interrupt for under/over run.        */
+	if (likely(!(status & drv_context->mask_sr)))
+		return IRQ_NONE;
+
+	if (status & SSSR_ROR || status & SSSR_TUR) {
+		dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",	status);
+		WARN_ON(1);
+		if (status & SSSR_ROR)
+			dev_err(dev, "we have Overrun\n");
+		if (status & SSSR_TUR)
+			dev_err(dev, "we have Underrun\n");
+	}
+
+	/* We can fall here when not using DMA mode */
+	if (!drv_context->cur_msg) {
+		disable_interface(drv_context);
+		disable_triggers(drv_context);
+	}
+	/* clear status register */
+	write_SSSR(drv_context->clear_sr, reg);
+	return IRQ_HANDLED;
+}
+
+static void poll_transfer(unsigned long data)
+{
+	struct ssp_driver_context *drv_context =
+		(struct ssp_driver_context *)data;
+
+	if (drv_context->tx)
+		while (drv_context->tx != drv_context->tx_end) {
+			drv_context->write(drv_context);
+			drv_context->read(drv_context);
+		}
+
+	while (!drv_context->read(drv_context))
+		cpu_relax();
+
+	poll_transfer_complete(drv_context);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @drv_context:	Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_driver_context *drv_context)
+{
+	u32 sssr;
+	u32 count = 0;
+	u32 cr0;
+	void *i2c_reg = drv_context->I2C_ioaddr;
+	struct device *dev = &drv_context->pdev->dev;
+	void *reg = drv_context->ioaddr;
+	struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
+	cr0 = chip->cr0;
+
+	dev_warn(dev, "In %s : Starting bit banging\n",\
+		__func__);
+	if (read_SSSR(reg) & SSP_NOT_SYNC)
+		dev_warn(dev, "SSP clock desynchronized.\n");
+	if (!(read_SSCR0(reg) & SSCR0_SSE))
+		dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+	dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+	write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+	write_SSPSP(0x02010007, reg);
+
+	write_SSTO(chip->timeout, reg);
+	write_SSCR0(cr0, reg);
+
+	/*
+	*  This routine uses the DFx block to override the SSP inputs
+	*  and outputs allowing us to bit bang SSPSCLK. On Langwell,
+	*  we have to generate the clock to clear busy.
+	*/
+	write_I2CDATA(0x3, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070034, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CDATA(0x00000099, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	sssr = read_SSSR(reg);
+
+	/* Bit bang the clock until CSS clears */
+	while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+		write_I2CDATA(0x2, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CDATA(0x3, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		sssr = read_SSSR(reg);
+		count++;
+	}
+	if (count >= MAX_BITBANGING_LOOP)
+		dev_err(dev, "ERROR in %s : infinite loop \
+			on bit banging. Aborting\n", __func__);
+
+	dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+	write_I2CDATA(0x0, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(int speed)
+{
+	return max(100000000 / speed, 4) - 1;
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi:	Pointer to the spi_device struct
+ * @msg:	Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct ssp_driver_context *drv_context = \
+	spi_master_get_devdata(spi->master);
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = drv_context->ioaddr;
+	u32 cr1;
+	struct device *dev = &drv_context->pdev->dev;
+	chip = spi_get_ctldata(msg->spi);
+
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	drv_context->cur_msg = msg;
+
+	/* We handle only one transfer message since the protocol module has to
+	   control the out of band signaling. */
+	transfer = list_entry(msg->transfers.next,
+					struct spi_transfer,
+					transfer_list);
+
+	/* Check transfer length */
+	if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+		(transfer->len == 0))) {
+		dev_warn(dev, "transfer length null or greater than %d\n",
+			MAX_SPI_TRANSFER_SIZE);
+		dev_warn(dev, "length = %d\n", transfer->len);
+		msg->status = -EINVAL;
+
+		if (msg->complete)
+			msg->complete(msg->context);
+
+		return 0;
+	}
+
+	/* Flush any remaining data (in case of failed previous transfer) */
+	flush(drv_context);
+
+	drv_context->tx  = (void *)transfer->tx_buf;
+	drv_context->rx  = (void *)transfer->rx_buf;
+	drv_context->len = transfer->len;
+	drv_context->write = chip->write;
+	drv_context->read = chip->read;
+
+	if (likely(chip->dma_enabled)) {
+		drv_context->dma_mapped = map_dma_buffers(drv_context);
+		if (unlikely(!drv_context->dma_mapped))
+			return 0;
+	} else {
+		drv_context->write = drv_context->tx ?
+			chip->write : null_writer;
+		drv_context->read  = drv_context->rx ?
+			chip->read : null_reader;
+	}
+	drv_context->tx_end = drv_context->tx + transfer->len;
+	drv_context->rx_end = drv_context->rx + transfer->len;
+
+	/* Clear status  */
+	write_SSSR(drv_context->clear_sr, reg);
+
+	/* setup the CR1 control register */
+	cr1 = chip->cr1 | drv_context->cr1_sig;
+
+	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* in case of len smaller than burst size, adjust the RX     */
+		/* threshold. All other cases will use the default threshold */
+		/* value. The RX fifo threshold must be aligned with the DMA */
+		/* RX transfer size, which may be limited to a multiple of 4 */
+		/* bytes due to 32bits DDR access.                           */
+		if  (drv_context->len / drv_context->n_bytes <=
+			drv_context->rx_fifo_threshold) {
+			u32 rx_fifo_threshold;
+
+			rx_fifo_threshold = (drv_context->len & ~(4 - 1)) /
+				drv_context->n_bytes;
+			cr1 &= ~(SSCR1_RFT);
+			cr1 |= SSCR1_RxTresh(rx_fifo_threshold)
+					& SSCR1_RFT;
+		} else {
+			write_SSTO(chip->timeout, reg);
+		}
+	}
+
+	dev_dbg(dev,
+		"transfer len:%d  n_bytes:%d  cr0:%x  cr1:%x",
+		drv_context->len, drv_context->n_bytes, chip->cr0, cr1);
+
+	/* first set CR1 */
+	write_SSCR1(cr1, reg);
+
+	/* Do bitbanging only if SSP not-enabled or not-synchronized */
+	if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+		(!(read_SSCR0(reg) & SSCR0_SSE))) &&
+		(drv_context->quirks & QUIRKS_BIT_BANGING))) {
+			start_bitbanging(drv_context);
+	} else {
+		/* (re)start the SSP */
+		write_SSCR0(chip->cr0, reg);
+	}
+
+	if (likely(chip->dma_enabled)) {
+		if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
+			pm_qos_update_request(&drv_context->pm_qos_req,
+				MIN_EXIT_LATENCY);
+		dma_transfer(drv_context);
+	} else {
+		tasklet_schedule(&drv_context->poll_transfer);
+	}
+
+	return 0;
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi:	Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip;
+	struct ssp_driver_context *drv_context =
+		spi_master_get_devdata(spi->master);
+	u32 tx_fifo_threshold;
+	u32 burst_size;
+	u32 clk_div;
+
+	if (!spi->bits_per_word)
+		spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+	if ((spi->bits_per_word < MIN_BITS_PER_WORD
+		|| spi->bits_per_word > MAX_BITS_PER_WORD))
+		return -EINVAL;
+
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev,
+			"failed setup: can't allocate chip data\n");
+			return -ENOMEM;
+		}
+	}
+	chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+		spi->bits_per_word - 16 : spi->bits_per_word)
+			| SSCR0_SSE
+			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+	/* protocol drivers may change the chip settings, so...  */
+	/* if chip_info exists, use it                           */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	chip->cr1 = 0;
+	if (chip_info) {
+		burst_size = chip_info->burst_size;
+		if (burst_size > IMSS_FIFO_BURST_8)
+			burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->timeout = chip_info->timeout;
+
+		if (chip_info->enable_loopback)
+			chip->cr1 |= SSCR1_LBM;
+
+		chip->dma_enabled = chip_info->dma_enabled;
+
+	} else {
+		/* if no chip_info provided by protocol driver, */
+		/* set default values                           */
+		dev_info(&spi->dev, "setting default chip values\n");
+
+		burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->dma_enabled = 1;
+		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			chip->timeout = 0;
+		else
+			chip->timeout = DFLT_TIMEOUT_VAL;
+	}
+	/* Set FIFO thresholds according to burst_size */
+	if (burst_size == IMSS_FIFO_BURST_8)
+		drv_context->rx_fifo_threshold = 8;
+	else if (burst_size == IMSS_FIFO_BURST_4)
+		drv_context->rx_fifo_threshold = 4;
+	else
+		drv_context->rx_fifo_threshold = 1;
+	tx_fifo_threshold = SPI_FIFO_SIZE - drv_context->rx_fifo_threshold;
+	chip->cr1 |= (SSCR1_RxTresh(drv_context->rx_fifo_threshold) &
+		SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) &
+		SSCR1_TFT);
+
+	drv_context->dma_mapped = 0;
+
+	/* setting phase and polarity. spi->mode comes from boardinfo */
+	if ((spi->mode & SPI_CPHA) != 0)
+		chip->cr1 |= SSCR1_SPH;
+	if ((spi->mode & SPI_CPOL) != 0)
+		chip->cr1 |= SSCR1_SPO;
+
+	if (drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
+		/* set slave mode */
+		chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
+	chip->cr1 |= SSCR1_SCFR;        /* clock is not free running */
+
+	dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
+		spi->bits_per_word,
+		spi->mode & 0x3);
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+		chip->read = u8_reader;
+		chip->write = u8_writer;
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+		chip->read = u16_reader;
+		chip->write = u16_writer;
+	} else if (spi->bits_per_word <= 32) {
+		chip->cr0 |= SSCR0_EDSS;
+		chip->n_bytes = 4;
+		chip->read = u32_reader;
+		chip->write = u32_writer;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize\n");
+		return -EINVAL;
+	}
+
+	if ((drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
+		chip->speed_hz = spi->max_speed_hz;
+		clk_div = ssp_get_clk_div(chip->speed_hz);
+		chip->cr0 |= clk_div << 8;
+	}
+	chip->bits_per_word = spi->bits_per_word;
+
+	spi_set_ctldata(spi, chip);
+
+	/* setup of drv_context members that will not change across transfers */
+	drv_context->n_bytes = chip->n_bytes;
+
+	if (chip->dma_enabled) {
+		intel_mid_ssp_spi_dma_init(drv_context);
+		drv_context->cr1_sig  = SSCR1_TSRE | SSCR1_RSRE;
+		drv_context->mask_sr  = SSSR_ROR | SSSR_TUR;
+		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			drv_context->cr1_sig  |= SSCR1_TRAIL;
+	} else {
+		drv_context->cr1_sig = SSCR1_RIE | SSCR1_TIE | SSCR1_TINTE;
+		drv_context->mask_sr = SSSR_RFS | SSSR_TFS |
+				 SSSR_ROR | SSSR_TUR | SSSR_TINT;
+	}
+	drv_context->clear_sr = SSSR_TUR  | SSSR_ROR | SSSR_TINT;
+
+	return 0;
+}
+
+/**
+ * cleanup() - Driver cleanup procedure
+ * @spi:	Pointer to the spi_device struct
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct ssp_driver_context *drv_context =
+		spi_master_get_devdata(spi->master);
+
+	if (drv_context->dma_initialized)
+		intel_mid_ssp_spi_dma_exit(drv_context);
+
+	/* Remove the PM_QOS request */
+	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_remove_request(&drv_context->pm_qos_req);
+
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+/**
+ * intel_mid_ssp_spi_probe() - Driver probe procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @ent:	Pointer to the pci_device_id struct
+ */
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct ssp_driver_context *drv_context = 0;
+	int status;
+	u32 iolen = 0;
+	u8 ssp_cfg;
+	int pos;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+
+	/* Check if the SSP we are probed for has been allocated */
+	/* to operate as SPI. This information is retreived from */
+	/* the field adid of the Vendor-Specific PCI capability  */
+	/* which is used as a configuration register.            */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability\n");
+		goto err_abort_probe;
+	}
+	if (SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)\n",
+			ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(dev, "found PCI SSP controller"
+		" (ID: %04xh:%04xh cfg: %02xh)\n",
+		pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for drv_context and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_slave\n");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	drv_context = spi_master_get_devdata(master);
+	drv_context->master = master;
+
+	drv_context->pdev = pdev;
+	drv_context->quirks = ent->driver_data;
+
+	/* Set platform & configuration quirks */
+	if (drv_context->quirks & QUIRKS_PLATFORM_MRST) {
+		/* Apply bit banging workarround on MRST */
+		drv_context->quirks |= QUIRKS_BIT_BANGING;
+		/* MRST slave mode workarrounds */
+		if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
+			drv_context->quirks |=
+				QUIRKS_USE_PM_QOS |
+				QUIRKS_SRAM_ADDITIONAL_CPY;
+	}
+	drv_context->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
+	if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
+		drv_context->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
+
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
+	master->num_chipselect = 1;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
+	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
+
+	drv_context->dma_initialized = 0;
+
+	/* get basic io resource and map it */
+	drv_context->paddr = pci_resource_start(pdev, 0);
+	iolen = pci_resource_len(pdev, 0);
+
+	status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
+	if (status)
+		goto err_free_1;
+
+	drv_context->ioaddr =
+		ioremap_nocache(drv_context->paddr, iolen);
+	if (!drv_context->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", drv_context->paddr);
+	dev_dbg(dev, "ioaddr = : %p\n", drv_context->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
+	dev_dbg(dev, "quirks = : %08lx\n", drv_context->quirks);
+
+	if (drv_context->quirks & QUIRKS_BIT_BANGING) {
+		/* Bit banging on the clock is done through */
+		/* DFT which is available through I2C.      */
+		/* get base address of I2C_Serbus registers */
+		drv_context->I2C_paddr = 0xff12b000;
+		drv_context->I2C_ioaddr =
+			ioremap_nocache(drv_context->I2C_paddr, 0x10);
+		if (!drv_context->I2C_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_3;
+		}
+	}
+
+	/* Attach to IRQ */
+	drv_context->irq = pdev->irq;
+	status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", drv_context);
+	if (status < 0) {
+		dev_err(&pdev->dev, "can not get IRQ\n");
+		goto err_free_4;
+	}
+
+	if (drv_context->quirks & QUIRKS_PLATFORM_MDFL) {
+		/* get base address of DMA selector. */
+		syscfg = drv_context->paddr - SYSCFG;
+		syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+		if (!syscfg_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_5;
+		}
+		iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+	}
+
+	tasklet_init(&drv_context->poll_transfer, poll_transfer,
+		(unsigned long)drv_context);
+
+	/* Register with the SPI framework */
+	dev_info(dev, "register with SPI framework (bus spi%d)\n",
+		master->bus_num);
+
+	status = spi_register_master(master);
+
+	if (status != 0) {
+		dev_err(dev, "problem registering spi\n");
+		goto err_free_5;
+	}
+
+	pci_set_drvdata(pdev, drv_context);
+
+	/* Create the PM_QOS request */
+	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_add_request(&drv_context->pm_qos_req,
+		PM_QOS_CPU_DMA_LATENCY,
+		PM_QOS_DEFAULT_VALUE);
+
+	return status;
+
+err_free_5:
+	free_irq(drv_context->irq, drv_context);
+err_free_4:
+	iounmap(drv_context->I2C_ioaddr);
+err_free_3:
+	iounmap(drv_context->ioaddr);
+err_free_2:
+	pci_release_region(pdev, 0);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+/**
+ * intel_mid_ssp_spi_remove() - driver remove procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+
+	if (!drv_context)
+		return;
+
+	/* Release IRQ */
+	free_irq(drv_context->irq, drv_context);
+
+	iounmap(drv_context->ioaddr);
+	if (drv_context->quirks & QUIRKS_BIT_BANGING)
+		iounmap(drv_context->I2C_ioaddr);
+
+	/* disconnect from the SPI framework */
+	spi_unregister_master(drv_context->master);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * intel_mid_ssp_spi_suspend() - Driver suspend procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @state:	pm_message_t
+ */
+static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+	dev_dbg(&pdev->dev, "suspend\n");
+
+	tasklet_disable(&drv_context->poll_transfer);
+
+	return 0;
+}
+
+/**
+ * intel_mid_ssp_spi_resume() - Driver resume procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
+{
+	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
+	dev_dbg(&pdev->dev, "resume\n");
+
+	tasklet_enable(&drv_context->poll_transfer);
+
+	return 0;
+}
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#endif /* CONFIG_PM */
+
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+	/* MRST SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0815), QUIRKS_PLATFORM_MRST},
+	/* MDFL SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0832), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP1 */
+	{ PCI_VDEVICE(INTEL, 0x0825), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP3 */
+	{ PCI_VDEVICE(INTEL, 0x0816), QUIRKS_PLATFORM_MDFL},
+	{},
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	__devexit_p(intel_mid_ssp_spi_remove),
+	.suspend =	intel_mid_ssp_spi_suspend,
+	.resume =	intel_mid_ssp_spi_resume,
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+
+module_exit(intel_mid_ssp_spi_exit);
+
diff --git a/drivers/spi/intel_mid_ssp_spi.h b/drivers/spi/intel_mid_ssp_spi.h
new file mode 100644
index 0000000..aef2fa8
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi.h
@@ -0,0 +1,321 @@
+/*
+ *  Copyright (C) Intel 2009
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_H_
+#define INTEL_MID_SSP_SPI_H_
+
+#define PCI_MRST_DMAC1_ID	0x0814
+#define PCI_MDFL_DMAC1_ID	0x0827
+
+#define SSP_NOT_SYNC 0x400000
+#define MAX_SPI_TRANSFER_SIZE 8192
+#define MAX_BITBANGING_LOOP   10000
+#define SPI_FIFO_SIZE 16
+
+/* PM QoS define */
+#define MIN_EXIT_LATENCY 20
+
+/* SSP assignement configuration from PCI config */
+#define SSP_CFG_GET_MODE(ssp_cfg)	((ssp_cfg) & 0x07)
+#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)	(((ssp_cfg) >> 3) & 0x07)
+#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)	((ssp_cfg) & 0x40)
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* Driver's quirk flags */
+/* This workarround bufferizes data in the audio fabric SDRAM from  */
+/* where the DMA transfers will operate. Should be enabled only for */
+/* SPI slave mode.                                                  */
+#define QUIRKS_SRAM_ADDITIONAL_CPY	1
+/* If set the trailing bytes won't be handled by the DMA.           */
+/* Trailing byte feature not fully available.                       */
+#define QUIRKS_DMA_USE_NO_TRAIL		2
+/* If set, the driver will use PM_QOS to reduce the latency         */
+/* introduced by the deeper C-states which may produce over/under   */
+/* run issues. Must be used in slave mode. In master mode, the      */
+/* latency is not critical, but setting this workarround  may       */
+/* improve the SPI throughput.                                      */
+#define QUIRKS_USE_PM_QOS		4
+/* This quirks is set on Moorestown                                 */
+#define QUIRKS_PLATFORM_MRST		8
+/* This quirks is set on Medfield                                   */
+#define QUIRKS_PLATFORM_MDFL		16
+/* If set, the driver will apply the bitbanging workarround needed  */
+/* to enable defective Langwell stepping A SSP. The defective SSP   */
+/* can be enabled only once, and should never be disabled.          */
+#define QUIRKS_BIT_BANGING		32
+/* If set, SPI is in slave clock mode                               */
+#define QUIRKS_SPI_SLAVE_CLOCK_MODE	64
+
+/* Uncomment to get RX and TX short dumps after each transfer */
+/* #define DUMP_RX 1 */
+#define MAX_TRAILING_BYTE_RETRY 16
+#define MAX_TRAILING_BYTE_LOOP 100
+#define DELAY_TO_GET_A_WORD 3
+#define DFLT_TIMEOUT_VAL 500
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+#define RX_DIRECTION 0
+#define TX_DIRECTION 1
+
+#define I2C_ACCESS_USDELAY 10
+
+#define DFLT_BITS_PER_WORD 16
+#define MIN_BITS_PER_WORD     4
+#define MAX_BITS_PER_WORD     32
+#define DFLT_FIFO_BURST_SIZE	IMSS_FIFO_BURST_8
+
+#define TRUNCATE(x, a) ((x) & ~((a)-1))
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+
+DEFINE_SSP_REG(I2CCTRL, 0x00);
+DEFINE_SSP_REG(I2CDATA, 0x04);
+
+DEFINE_SSP_REG(GPLR1, 0x04);
+DEFINE_SSP_REG(GPDR1, 0x0c);
+DEFINE_SSP_REG(GPSR1, 0x14);
+DEFINE_SSP_REG(GPCR1, 0x1C);
+DEFINE_SSP_REG(GAFR1_U, 0x44);
+
+#define SYSCFG  0x20bc0
+
+#define SRAM_BASE_ADDR 0xfffdc000
+#define SRAM_RX_ADDR   SRAM_BASE_ADDR
+#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
+#define SSCR0_NCS   (1 << 21)           /* Network clock select */
+#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
+#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF		(1 << 2)	/* Tx FIFO Not Full */
+#define SSSR_RNE		(1 << 3)	/* Rx FIFO Not Empty */
+#define SSSR_BSY		(1 << 4)	/* SSP Busy */
+#define SSSR_TFS		(1 << 5)	/* Tx FIFO Service Request */
+#define SSSR_RFS		(1 << 6)	/* Rx FIFO Service Request */
+#define SSSR_ROR		(1 << 7)	/* Rx FIFO Overrun */
+#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
+
+#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
+#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
+
+#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
+#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
+#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
+
+#define SSSR_BCE         (1 << 23) /* Bit Count Error */
+#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC         (1 << 20) /* End Of Chain */
+#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
+#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
+#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+struct callback_param {
+	void *drv_context;
+	u32 direction;
+};
+
+struct ssp_driver_context {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void *ioaddr;
+	int irq;
+
+	/* I2C registers */
+	dma_addr_t I2C_paddr;
+	void *I2C_ioaddr;
+
+	/* SSP masks*/
+	u32 cr1_sig;
+	u32 cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+	/* PM_QOS request */
+	struct pm_qos_request_list pm_qos_req;
+
+	struct tasklet_struct poll_transfer;
+
+	spinlock_t lock;
+
+	/* Current message transfer state info */
+	struct spi_message *cur_msg;
+	size_t len;
+	size_t len_dma_rx;
+	size_t len_dma_tx;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	bool dma_initialized;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	u8 n_bytes;
+	int (*write)(struct ssp_driver_context *drv_context);
+	int (*read)(struct ssp_driver_context *drv_context);
+
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan    *txchan;
+	struct dma_chan    *rxchan;
+	struct workqueue_struct *dma_wq;
+	struct work_struct complete_work;
+
+	u8 __iomem *virt_addr_sram_tx;
+	u8 __iomem *virt_addr_sram_rx;
+
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+	struct pci_dev *dmac1;
+
+	unsigned long quirks;
+	u32 rx_fifo_threshold;
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 timeout;
+	u8 n_bytes;
+	u8 dma_enabled;
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct ssp_driver_context *drv_context);
+	int (*read)(struct ssp_driver_context *drv_context);
+};
+
+
+enum intel_mid_ssp_spi_fifo_burst {
+	IMSS_FIFO_BURST_1,
+	IMSS_FIFO_BURST_4,
+	IMSS_FIFO_BURST_8
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	enum intel_mid_ssp_spi_fifo_burst burst_size;
+	u32 timeout;
+	u8 enable_loopback;
+	u8 dma_enabled;
+};
+
+
+#define SPI_DIB_NAME_LEN  16
+#define SPI_DIB_SPEC_INFO_LEN      10
+
+struct spi_dib_header {
+	u32       signature;
+	u32       length;
+	u8         rev;
+	u8         checksum;
+	u8         dib[0];
+} __attribute__((packed));
+
+#endif /*INTEL_MID_SSP_SPI_H_*/

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-02 22:40   ` Alan Cox
@ 2011-02-03 13:28     ` Mark Brown
  2011-02-03 15:04       ` Alan Cox
  2011-02-14 19:09     ` Grant Likely
  1 sibling, 1 reply; 13+ messages in thread
From: Mark Brown @ 2011-02-03 13:28 UTC (permalink / raw)
  To: Alan Cox
  Cc: Russ Gorby, David Brownell, Grant Likely,
	open list:SPI SUBSYSTEM, open list

On Wed, Feb 02, 2011 at 10:40:54PM +0000, Alan Cox wrote:

> And this is the unified one that handles all the devices, but I gather
> may need some fixing/test work on Medfield.

I've got the same question here as I had with Russ' patch: it looks like
there's some overlap with the SSP ports used for audio (it's just a
generic programmable serial port so even if it's not normally used for
audio that's a possiblity), how is that handled?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-03 13:28     ` Mark Brown
@ 2011-02-03 15:04       ` Alan Cox
  2011-02-03 15:06         ` Mark Brown
  0 siblings, 1 reply; 13+ messages in thread
From: Alan Cox @ 2011-02-03 15:04 UTC (permalink / raw)
  To: Mark Brown
  Cc: Russ Gorby, David Brownell, Grant Likely,
	open list:SPI SUBSYSTEM, open list

On Thu, 3 Feb 2011 13:28:00 +0000
Mark Brown <broonie@opensource.wolfsonmicro.com> wrote:

> On Wed, Feb 02, 2011 at 10:40:54PM +0000, Alan Cox wrote:
> 
> > And this is the unified one that handles all the devices, but I gather
> > may need some fixing/test work on Medfield.
> 
> I've got the same question here as I had with Russ' patch: it looks like
> there's some overlap with the SSP ports used for audio (it's just a
> generic programmable serial port so even if it's not normally used for
> audio that's a possiblity), how is that handled?

The SSP has PCI configuration indicating how it is being assigned, which
is in vendor capability byte 6. The low 3 bits indicte the mode, where
mode 1 is an SPI master/slave, and in that case bit 6 is set for a slave.

The SSP/SPI driver will only grab ports that have been assigned to that
purpose as part of the system design.

I'm just putting the other bits from the generic driver back into the
more featured/tested specific driver that Russ posted.

Alan

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-03 15:04       ` Alan Cox
@ 2011-02-03 15:06         ` Mark Brown
  0 siblings, 0 replies; 13+ messages in thread
From: Mark Brown @ 2011-02-03 15:06 UTC (permalink / raw)
  To: Alan Cox
  Cc: Russ Gorby, David Brownell, Grant Likely,
	open list:SPI SUBSYSTEM, open list

On Thu, Feb 03, 2011 at 03:04:32PM +0000, Alan Cox wrote:
> Mark Brown <broonie@opensource.wolfsonmicro.com> wrote:

> > I've got the same question here as I had with Russ' patch: it looks like
> > there's some overlap with the SSP ports used for audio (it's just a
> > generic programmable serial port so even if it's not normally used for
> > audio that's a possiblity), how is that handled?

> The SSP has PCI configuration indicating how it is being assigned, which
> is in vendor capability byte 6. The low 3 bits indicte the mode, where
> mode 1 is an SPI master/slave, and in that case bit 6 is set for a slave.

OK, cool - just checking as it's a common issue for these generic serial
ports.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011
  2011-02-02 21:01 ` [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011 Russ Gorby
@ 2011-02-12  9:19   ` Grant Likely
  2011-02-14 17:22     ` Gorby, Russ
  0 siblings, 1 reply; 13+ messages in thread
From: Grant Likely @ 2011-02-12  9:19 UTC (permalink / raw)
  To: Russ Gorby; +Cc: David Brownell, open list:SPI SUBSYSTEM, open list

On Wed, Feb 02, 2011 at 01:01:51PM -0800, Russ Gorby wrote:
> Hello SPI maintainers,
> I am sending a patch for the (new) intel_mid_ssp_spi driver for
> consideration for inclusion in the Linux Kernel. This is a SPI master
> controller driver that is being used for the intel MID platform (Medfield).
> It uses the on-board Bulverde SSP controller configured for SPI (spibus #3)
> running at 25Mhz.

Hi Russ,

Just for future reference, when you're only sending one patch, please
put your notes into the patch description itself, or immediately after
the --- line.  No need for a cover letter like this one.

Thanks,
g.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* RE: [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011
  2011-02-12  9:19   ` Grant Likely
@ 2011-02-14 17:22     ` Gorby, Russ
  0 siblings, 0 replies; 13+ messages in thread
From: Gorby, Russ @ 2011-02-14 17:22 UTC (permalink / raw)
  To: Grant Likely; +Cc: David Brownell, open list:SPI SUBSYSTEM, open list

OK got it. Thanks.

>-----Original Message-----
>From: Grant Likely [mailto:glikely@secretlab.ca] On Behalf Of Grant
>Likely
>Sent: Saturday, February 12, 2011 1:19 AM
>To: Gorby, Russ
>Cc: David Brownell; open list:SPI SUBSYSTEM; open list
>Subject: Re: [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011
>
>On Wed, Feb 02, 2011 at 01:01:51PM -0800, Russ Gorby wrote:
>> Hello SPI maintainers,
>> I am sending a patch for the (new) intel_mid_ssp_spi driver for
>> consideration for inclusion in the Linux Kernel. This is a SPI master
>> controller driver that is being used for the intel MID platform
>(Medfield).
>> It uses the on-board Bulverde SSP controller configured for SPI
>(spibus #3)
>> running at 25Mhz.
>
>Hi Russ,
>
>Just for future reference, when you're only sending one patch, please
>put your notes into the patch description itself, or immediately after
>the --- line.  No need for a cover letter like this one.
>
>Thanks,
>g.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform
  2011-02-02 22:40   ` Alan Cox
  2011-02-03 13:28     ` Mark Brown
@ 2011-02-14 19:09     ` Grant Likely
  1 sibling, 0 replies; 13+ messages in thread
From: Grant Likely @ 2011-02-14 19:09 UTC (permalink / raw)
  To: Alan Cox; +Cc: Russ Gorby, David Brownell, open list:SPI SUBSYSTEM, open list

On Wed, Feb 02, 2011 at 10:40:54PM +0000, Alan Cox wrote:
> On Wed,  2 Feb 2011 13:01:52 -0800
> Russ Gorby <russ.gorby@intel.com> wrote:
> 
> > SPI master controller driver for the Intel MID platform Medfield
> > This driver uses the Penwell SSP controller and configures it to
> > be a SPI device (spibus 3). This bus supports a single device -
> > the 3G SPI modem that can operate up to 25Mhz.
> 
> And this is the unified one that handles all the devices, but I gather
> may need some fixing/test work on Medfield.
> 
> (and the only reason you haven't seen this submitted yet is I was
>  asked to wait until it had been tested on those platforms. So I'm
>  very surprised to see the other submission)
> 
> Alan
> 
> --
> 
> From: Mathieu SOULARD <mathieux.soulard@intel.com>
> 
> intel_mid_ssp_spi:  Moorestown and Medfield SPI for SSP devices
>     
> This driver is a fusion of various internal drivers into a single
> driver for the SPI slave/master on the Intel Moorestown and Medfield
> SSP devices.
>     
> Signed-off-by: Mathieu SOULARD <mathieux.soulard@intel.com>
> [Ported to the -next tree DMA engine]
> Signed-off-by: Alan Cox <alan@linux.intel.com>
> 
> diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
> index bb233a9..6d1a41f 100644
> --- a/drivers/spi/Kconfig
> +++ b/drivers/spi/Kconfig
> @@ -178,6 +178,14 @@ config SPI_IMX
>  	  This enables using the Freescale i.MX SPI controllers in master
>  	  mode.
>  
> +config SPI_INTEL_MID_SSP
> +	tristate "SSP SPI controller driver for Intel MID platforms (EXPERIMENTAL)"
> +	depends on SPI_MASTER && INTEL_MID_DMAC && EXPERIMENTAL
> +	help
> +	  This is the unified SSP SPI slave controller driver for the Intel
> +	  MID platforms, handling Moorestown & Medfield, master & slave
> +	  clock mode.
> +
>  config SPI_LM70_LLP
>  	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
>  	depends on PARPORT && EXPERIMENTAL
> diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
> index 86d1b5f..6e052b5 100644
> --- a/drivers/spi/Makefile
> +++ b/drivers/spi/Makefile
> @@ -24,6 +24,7 @@ obj-$(CONFIG_SPI_DW_MMIO)		+= dw_spi_mmio.o
>  obj-$(CONFIG_SPI_EP93XX)		+= ep93xx_spi.o
>  obj-$(CONFIG_SPI_GPIO)			+= spi_gpio.o
>  obj-$(CONFIG_SPI_IMX)			+= spi_imx.o
> +obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= intel_mid_ssp_spi.o

Please use "spi_" as a prefix instead of a "_spi" suffix on spi
drivers.  I'm asking for all new spi drivers to follow this
convention.

>  obj-$(CONFIG_SPI_LM70_LLP)		+= spi_lm70llp.o
>  obj-$(CONFIG_SPI_PXA2XX)		+= pxa2xx_spi.o
>  obj-$(CONFIG_SPI_PXA2XX_PCI)		+= pxa2xx_spi_pci.o
> diff --git a/drivers/spi/intel_mid_ssp_spi.c b/drivers/spi/intel_mid_ssp_spi.c
> new file mode 100644
> index 0000000..2d6d881
> --- /dev/null
> +++ b/drivers/spi/intel_mid_ssp_spi.c
> @@ -0,0 +1,1403 @@
> +/*
> + * intel_mid_ssp_spi.c

Nit: Personally, I prefer not to have the filename in the header
block.

> + * This driver supports Bulverde SSP core used on Intel MID platforms
> + * It supports SSP of Moorestown & Medfield platforms and handles clock
> + * slave & master modes.
> + *
> + * Copyright (c) 2010, Intel Corporation.
> + *  Ken Mills <ken.k.mills@intel.com>
> + *  Sylvain Centelles <sylvain.centelles@intel.com>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program; if not, write to the Free Software Foundation, Inc.,
> + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
> + *
> + */
> +
> +/*
> + * Note:
> + *
> + * Supports DMA and non-interrupt polled transfers.
> + *
> + */
> +
> +#include <linux/delay.h>
> +#include <linux/interrupt.h>
> +#include <linux/highmem.h>
> +#include <linux/pci.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/intel_mid_dma.h>
> +#include <linux/pm_qos_params.h>
> +
> +#include <linux/spi/spi.h>
> +#include "intel_mid_ssp_spi.h"

This file is only included once by this file.  Everything in
intel_mid_ssp_spi.h should be moved into this file.

> +
> +#define DRIVER_NAME "intel_mid_ssp_spi_unified"

Used exactly once.  Drop the #defile and move it inline.

> +
> +MODULE_AUTHOR("Ken Mills");
> +MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
> +MODULE_LICENSE("GPL");
> +
> +static const struct pci_device_id pci_ids[];
> +
> +#ifdef DUMP_RX
> +static void dump_trailer(const struct device *dev, char *buf, int len, int sz)

All symbols should use the driver's prefix, including
non-exported statics and debug functions.  Currently that is
"intel_mid_ssp_spi_" (which is a little long, it can probably
be abbreviated a bit).

> +{
> +	int tlen1 = (len < sz ? len : sz);
> +	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
> +	unsigned char *p;
> +	static char msg[MAX_SPI_TRANSFER_SIZE];

Wait, 8k on the stack!?!  No, wait, this is a static, which is also a
very bad idea because it means there can never be concurrent accesses
to this function.  Don't use a static buffer for this; instead send
the output to the console as you generate it.  Get rid of all the
sprintf stuff.

> +
> +	memset(msg, '\0', sizeof(msg));
> +	p = buf;
> +	while (p < buf + tlen1)
> +		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +
> +	if (tlen2 > 0) {
> +		sprintf(msg, "%s .....", msg);
> +		p = (buf+len) - tlen2;
> +		while (p < buf + len)
> +			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
> +	}
> +
> +	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
> +		   len-tlen2, len - 1, msg);
> +}
> +#endif
> +
> +static inline u32 is_tx_fifo_empty(struct ssp_driver_context *drv_context)
> +{
> +	u32 sssr;
> +	sssr = read_SSSR(drv_context->ioaddr);
> +	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
> +		return 0;
> +	else
> +		return 1;

or more simply:

+	u32 sssr = read_SSSR(drv_context->ioaddr);
+	return ((sssr & SSSR_TFL_MASK) == 0 && (sssr & SSSR_TNF) != 0)

> +}
> +
> +static inline u32 is_rx_fifo_empty(struct ssp_driver_context *drv_context)
> +{
> +	return ((read_SSSR(drv_context->ioaddr) & SSSR_RNE) == 0);
> +}
> +
> +static inline void disable_interface(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
> +}
> +
> +static inline void disable_triggers(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	write_SSCR1(read_SSCR1(reg) & ~drv_context->cr1_sig, reg);
> +}
> +
> +
> +static void flush(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	u32 i = 0;
> +
> +	/* If the transmit fifo is not empty, reset the interface. */
> +	if (!is_tx_fifo_empty(drv_context)) {
> +		dev_err(&drv_context->pdev->dev,
> +				"TX FIFO not empty. Reset of SPI IF");
> +		disable_interface(drv_context);
> +		return;
> +	}
> +
> +	dev_dbg(&drv_context->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
> +	while (!is_rx_fifo_empty(drv_context) && (i < SPI_FIFO_SIZE + 1)) {
> +		read_SSDR(reg);
> +		i++;
> +	}
> +	WARN(i > 0, "%d words flush occured\n", i);
> +
> +	return;

Drop the superfluous 'return'.

> +}
> +
> +static int null_writer(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	u8 n_bytes = drv_context->n_bytes;
> +
> +	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +		|| (drv_context->tx == drv_context->tx_end))
> +		return 0;

It's unusual in linux for a return value of '0' to be the failure
case.  Some comments describing what is being tested for would also
make it easier to understand.

> +
> +	write_SSDR(0, reg);
> +	drv_context->tx += n_bytes;
> +
> +	return 1;
> +}
> +
> +static int null_reader(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	u8 n_bytes = drv_context->n_bytes;
> +
> +	while ((read_SSSR(reg) & SSSR_RNE)
> +		&& (drv_context->rx < drv_context->rx_end)) {
> +		read_SSDR(reg);
> +		drv_context->rx += n_bytes;
> +	}
> +
> +	return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static int u8_writer(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +		|| (drv_context->tx == drv_context->tx_end))
> +		return 0;
> +
> +	write_SSDR(*(u8 *)(drv_context->tx), reg);
> +	++drv_context->tx;

The following is an accepted pattern for this:

	write_SSDR(*(u8 *)(drv_context->tx++), reg);

> +
> +	return 1;
> +}
> +
> +static int u8_reader(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	while ((read_SSSR(reg) & SSSR_RNE)
> +		&& (drv_context->rx < drv_context->rx_end)) {
> +		*(u8 *)(drv_context->rx) = read_SSDR(reg);
> +		++drv_context->rx;

ditto

> +	}
> +
> +	return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static int u16_writer(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +		|| (drv_context->tx == drv_context->tx_end))
> +		return 0;

This test is identical to the u8 version, and same in the u32 version
below.  A macro would probably be appropriate.

> +
> +	write_SSDR(*(u16 *)(drv_context->tx), reg);
> +	drv_context->tx += 2;
> +
> +	return 1;
> +}
> +
> +static int u16_reader(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	while ((read_SSSR(reg) & SSSR_RNE)
> +		&& (drv_context->rx < drv_context->rx_end)) {
> +		*(u16 *)(drv_context->rx) = read_SSDR(reg);
> +		drv_context->rx += 2;

Ditto

> +	}
> +
> +	return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static int u32_writer(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
> +		|| (drv_context->tx == drv_context->tx_end))
> +		return 0;
> +
> +	write_SSDR(*(u32 *)(drv_context->tx), reg);
> +	drv_context->tx += 4;
> +
> +	return 1;
> +}
> +
> +static int u32_reader(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	while ((read_SSSR(reg) & SSSR_RNE)
> +		&& (drv_context->rx < drv_context->rx_end)) {
> +		*(u32 *)(drv_context->rx) = read_SSDR(reg);
> +		drv_context->rx += 4;
> +	}
> +
> +	return drv_context->rx == drv_context->rx_end;
> +}
> +
> +static bool chan_filter(struct dma_chan *chan, void *param)
> +{
> +	struct ssp_driver_context *drv_context =
> +		(struct ssp_driver_context *)param;
> +	bool ret = false;
> +
> +	if (!drv_context->dmac1)
> +		return ret;
> +
> +	if (chan->device->dev == &drv_context->dmac1->dev)
> +		ret = true;
> +
> +	return ret;
> +}
> +
> +/**
> + * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
> + * @drv_context:	Pointer to the private driver context
> + */
> +static void unmap_dma_buffers(struct ssp_driver_context *drv_context)
> +{
> +	struct device *dev = &drv_context->pdev->dev;
> +
> +	if (!drv_context->dma_mapped)
> +		return;
> +	dma_unmap_single(dev, drv_context->rx_dma, drv_context->len,
> +		PCI_DMA_FROMDEVICE);
> +	dma_unmap_single(dev, drv_context->tx_dma, drv_context->len,
> +		PCI_DMA_TODEVICE);
> +	drv_context->dma_mapped = 0;
> +}
> +
> +/**
> + * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
> + * @arg:	Pointer to the data provided at callback registration
> + *
> + * This function is set as callback for both RX and TX DMA transfers. The
> + * RX or TX 'done' flag is set acording to the direction of the ended
> + * transfer. Then, if both RX and TX flags are set, it means that the
> + * transfer job is completed.
> + */
> +static void intel_mid_ssp_spi_dma_done(void *arg)
> +{
> +	struct callback_param *cb_param = (struct callback_param *)arg;
> +	struct ssp_driver_context *drv_context = cb_param->drv_context;
> +	struct device *dev = &drv_context->pdev->dev;
> +	void *reg = drv_context->ioaddr;
> +
> +	if (cb_param->direction == TX_DIRECTION)
> +		drv_context->txdma_done = 1;
> +	else
> +		drv_context->rxdma_done = 1;
> +
> +	dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
> +		cb_param->direction, drv_context->rxdma_done,
> +		drv_context->txdma_done);
> +
> +	if (drv_context->txdma_done && drv_context->rxdma_done) {
> +		/* Clear Status Register */
> +		write_SSSR(drv_context->clear_sr, reg);
> +		dev_dbg(dev, "DMA done\n");
> +		/* Disable Triggers to DMA or to CPU*/
> +		disable_triggers(drv_context);
> +		unmap_dma_buffers(drv_context);
> +
> +		queue_work(drv_context->dma_wq, &drv_context->complete_work);
> +	}
> +}
> +
> +/**
> + * intel_mid_ssp_spi_dma_init() - Initialize DMA
> + * @drv_context:	Pointer to the private driver context
> + *
> + * This function is called at driver setup phase to allocate DMA
> + * ressources.
> + */
> +static void intel_mid_ssp_spi_dma_init(struct ssp_driver_context *drv_context)
> +{
> +	struct intel_mid_dma_slave *rxs, *txs;
> +	struct dma_slave_config *ds;
> +	dma_cap_mask_t mask;
> +	struct device *dev = &drv_context->pdev->dev;
> +	unsigned int device_id;
> +
> +	/* Configure RX channel parameters */
> +	rxs = &drv_context->dmas_rx;
> +	ds = &rxs->dma_slave;
> +
> +	ds->direction = DMA_FROM_DEVICE;
> +	rxs->hs_mode = LNW_DMA_HW_HS;
> +	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
> +	ds->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
> +	ds->src_addr_width = drv_context->n_bytes;
> +
> +	/* Use a DMA burst according to the FIFO thresholds */
> +	if (drv_context->rx_fifo_threshold == 8) {
> +		ds->src_maxburst = 8;
> +		ds->dst_maxburst = 8;
> +	} else if (drv_context->rx_fifo_threshold == 4) {
> +		ds->src_maxburst = 4;
> +		ds->dst_maxburst = 4;
> +	} else {
> +		ds->src_maxburst = 1;
> +		ds->dst_maxburst = 1;
> +	}
> +
> +	/* Configure TX channel parameters */
> +	txs = &drv_context->dmas_tx;
> +	ds = &txs->dma_slave;
> +
> +	ds->direction = DMA_TO_DEVICE;
> +	txs->hs_mode = LNW_DMA_HW_HS;
> +	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
> +	ds->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
> +	ds->dst_addr_width = drv_context->n_bytes;
> +
> +	/* Use a DMA burst according to the FIFO thresholds */
> +	if (drv_context->rx_fifo_threshold == 8) {
> +		ds->src_maxburst = 8;
> +		ds->dst_maxburst = 8;
> +	} else if (drv_context->rx_fifo_threshold == 4) {
> +		ds->src_maxburst = 4;
> +		ds->dst_maxburst = 4;
> +	} else {
> +		ds->src_maxburst = 1;
> +		ds->dst_maxburst = 1;
> +	}

Other than the ->direction and ->cfg_mode settings, these two blocks
are identical.  Maybe this could be simplified?  Also, the maxburst
values are the same for both dst_ and src_ on both halves.  It can
probably be calculated once and used to initialize both dma
structures.

> +
> +	/* Nothing more to do if already initialized */
> +	if (drv_context->dma_initialized)
> +		return;
> +
> +	/* Use DMAC1 */
> +	if (drv_context->quirks & QUIRKS_PLATFORM_MRST)
> +		device_id = PCI_MRST_DMAC1_ID;
> +	else
> +		device_id = PCI_MDFL_DMAC1_ID;
> +
> +	drv_context->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL,
> +							device_id, NULL);
> +
> +	if (!drv_context->dmac1) {
> +		dev_err(dev, "Can't find DMAC1");
> +		return;
> +	}
> +
> +	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
> +		drv_context->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
> +				2 * MAX_SPI_TRANSFER_SIZE);
> +		if (drv_context->virt_addr_sram_rx)
> +			drv_context->virt_addr_sram_tx =
> +				drv_context->virt_addr_sram_rx +
> +				MAX_SPI_TRANSFER_SIZE;
> +		else
> +			dev_err(dev, "Virt_addr_sram_rx is null\n");
> +	}
> +
> +	/* 1. Allocate rx channel */
> +	dma_cap_zero(mask);
> +	dma_cap_set(DMA_MEMCPY, mask);
> +	dma_cap_set(DMA_SLAVE, mask);
> +
> +	drv_context->rxchan = dma_request_channel(mask, chan_filter,
> +		drv_context);
> +	if (!drv_context->rxchan)
> +		goto err_exit;
> +
> +	drv_context->rxchan->private = rxs;
> +
> +	/* 2. Allocate tx channel */
> +	dma_cap_set(DMA_SLAVE, mask);
> +	dma_cap_set(DMA_MEMCPY, mask);
> +
> +	drv_context->txchan = dma_request_channel(mask, chan_filter,
> +		drv_context);
> +
> +	if (!drv_context->txchan)
> +		goto free_rxchan;
> +	else
> +		drv_context->txchan->private = txs;
> +
> +	/* set the dma done bit to 1 */
> +	drv_context->txdma_done = 1;
> +	drv_context->rxdma_done = 1;
> +
> +	drv_context->tx_param.drv_context  = drv_context;
> +	drv_context->tx_param.direction = TX_DIRECTION;
> +	drv_context->rx_param.drv_context  = drv_context;
> +	drv_context->rx_param.direction = RX_DIRECTION;
> +
> +	drv_context->dma_initialized = 1;
> +
> +	return;
> +
> +free_rxchan:
> +	dma_release_channel(drv_context->rxchan);
> +err_exit:
> +	dev_err(dev, "Error : DMA Channel Not available\n");
> +
> +	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
> +		iounmap(drv_context->virt_addr_sram_rx);
> +
> +	pci_dev_put(drv_context->dmac1);
> +	return;
> +}
> +
> +/**
> + * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
> + * @drv_context:	Pointer to the private driver context
> + */
> +static void intel_mid_ssp_spi_dma_exit(struct ssp_driver_context *drv_context)
> +{
> +	dma_release_channel(drv_context->txchan);
> +	dma_release_channel(drv_context->rxchan);
> +
> +	if (drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
> +		iounmap(drv_context->virt_addr_sram_rx);
> +
> +	pci_dev_put(drv_context->dmac1);
> +}
> +
> +/**
> + * dma_transfer() - Initiate a DMA transfer
> + * @drv_context:	Pointer to the private driver context
> + */
> +static void dma_transfer(struct ssp_driver_context *drv_context)
> +{
> +	dma_addr_t ssdr_addr;
> +	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
> +	struct dma_chan *txchan, *rxchan;
> +	enum dma_ctrl_flags flag;
> +	struct device *dev = &drv_context->pdev->dev;
> +
> +	/* get Data Read/Write address */
> +	ssdr_addr = (dma_addr_t)(drv_context->paddr + 0x10);
> +
> +	if (drv_context->tx_dma)
> +		drv_context->txdma_done = 0;
> +
> +	if (drv_context->rx_dma)
> +		drv_context->rxdma_done = 0;
> +
> +	/* 2. prepare the RX dma transfer */
> +	txchan = drv_context->txchan;
> +	rxchan = drv_context->rxchan;
> +
> +	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
> +
> +	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
> +		/* Since the DMA is configured to do 32bits access */
> +		/* to/from the DDR, the DMA transfer size must be  */
> +		/* a multiple of 4 bytes                           */
> +		drv_context->len_dma_rx = drv_context->len & ~(4 - 1);
> +		drv_context->len_dma_tx = drv_context->len_dma_rx;
> +
> +		/* In Rx direction, TRAIL Bytes are handled by memcpy */
> +		if (drv_context->rx_dma &&
> +			(drv_context->len_dma_rx >
> +			drv_context->rx_fifo_threshold * drv_context->n_bytes))
> +			drv_context->len_dma_rx =
> +					TRUNCATE(drv_context->len_dma_rx,
> +					drv_context->rx_fifo_threshold *
> +					drv_context->n_bytes);
> +		else if (!drv_context->rx_dma)
> +			dev_err(dev, "ERROR : rx_dma is null\r\n");
> +	} else {
> +		/* TRAIL Bytes are handled by DMA */
> +		if (drv_context->rx_dma) {
> +			drv_context->len_dma_rx = drv_context->len;
> +			drv_context->len_dma_tx = drv_context->len;
> +		} else {
> +			dev_err(dev, "ERROR : drv_context->rx_dma is null!\n");
> +		}
> +	}
> +
> +	rxdesc = rxchan->device->device_prep_dma_memcpy
> +		(rxchan,				/* DMA Channel */

Keep leading '(' on the line with the function name please.

> +		drv_context->rx_dma,			/* DAR */
> +		ssdr_addr,				/* SAR */
> +		drv_context->len_dma_rx,		/* Data Length */
> +		flag);					/* Flag */
> +
> +	if (rxdesc) {
> +		rxdesc->callback = intel_mid_ssp_spi_dma_done;
> +		rxdesc->callback_param = &drv_context->rx_param;
> +	} else {
> +		dev_dbg(dev, "rxdesc is null! (len_dma_rx:%d)\n",
> +			drv_context->len_dma_rx);
> +		drv_context->rxdma_done = 1;
> +	}
> +
> +	/* 3. prepare the TX dma transfer */
> +	if (drv_context->tx_dma) {
> +		txdesc = txchan->device->device_prep_dma_memcpy
> +		(txchan,				/* DMA Channel */

Ditto, and indent the parameters.  The comments at the end of the line
aren't at all helpful (they state obvious facts instead of talking
about what the driver is /doing/).

> +		ssdr_addr,				/* DAR */
> +		drv_context->tx_dma,			/* SAR */
> +		drv_context->len_dma_tx,		/* Data Length */
> +		flag);					/* Flag */
> +		if (txdesc) {
> +			txdesc->callback = intel_mid_ssp_spi_dma_done;
> +			txdesc->callback_param = &drv_context->tx_param;
> +		} else {
> +			dev_dbg(dev, "txdesc is null! (len_dma_tx:%d)\n",
> +				drv_context->len_dma_tx);
> +			drv_context->txdma_done = 1;
> +		}
> +	} else {
> +		dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
> +		return;
> +	}

Revers the logic so that the else block comes first and then the if
block can lose an indentation level.  Like this:

+	if (!drv_context->tx_dma) {
+		dev_err(dev, "ERROR : drv_context->tx_dma is null!\n");
+		return;
+	}
+	txdesc = txchan->device->device_prep_dma_memcpy(...);
+	...

> +
> +	dev_info(dev, "DMA transfer len:%d len_dma_tx:%d len_dma_rx:%d\n",
> +		drv_context->len, drv_context->len_dma_tx,
> +		drv_context->len_dma_rx);
> +
> +	if (rxdesc || txdesc) {
> +		if (rxdesc) {
> +			dev_dbg(dev, "Firing DMA RX channel\n");
> +			rxdesc->tx_submit(rxdesc);
> +		}
> +		if (txdesc) {
> +			dev_dbg(dev, "Firing DMA TX channel\n");
> +			txdesc->tx_submit(txdesc);
> +		}
> +	} else {
> +		struct callback_param cb_param;
> +		cb_param.drv_context = drv_context;
> +		dev_dbg(dev, "Bypassing DMA transfer\n");
> +		intel_mid_ssp_spi_dma_done(&cb_param);
> +	}

Ditto here, the logic would read better if it was reversed so the else
block comes first.

> +}
> +
> +/**
> + * map_dma_buffers() - Map DMA buffer before a transfer
> + * @drv_context:	Pointer to the private drivzer context
> + */
> +static int map_dma_buffers(struct ssp_driver_context *drv_context)
> +{
> +	struct device *dev = &drv_context->pdev->dev;
> +
> +	if (unlikely(drv_context->dma_mapped)) {
> +		dev_err(dev, "ERROR : DMA buffers already mapped\n");
> +		return 0;
> +	}
> +	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
> +		/* Copy drv_context->tx into sram_tx */
> +		memcpy_toio(drv_context->virt_addr_sram_tx, drv_context->tx,
> +			drv_context->len);
> +#ifdef DUMP_RX
> +		dump_trailer(&drv_context->pdev->dev, drv_context->tx,
> +			drv_context->len, 16);
> +#endif
> +		drv_context->rx_dma = SRAM_RX_ADDR;
> +		drv_context->tx_dma = SRAM_TX_ADDR;
> +	} else {
> +		/* no QUIRKS_SRAM_ADDITIONAL_CPY */
> +		if (unlikely(drv_context->dma_mapped))
> +			return 1;
> +
> +		drv_context->tx_dma =
> +			dma_map_single(dev, drv_context->tx, drv_context->len,
> +				PCI_DMA_TODEVICE);
> +		if (unlikely(dma_mapping_error(dev, drv_context->tx_dma))) {
> +			dev_err(dev, "ERROR : tx dma mapping failed\n");
> +			return 0;
> +		}
> +
> +		drv_context->rx_dma =
> +			dma_map_single(dev, drv_context->rx, drv_context->len,
> +				PCI_DMA_FROMDEVICE);
> +		if (unlikely(dma_mapping_error(dev, drv_context->rx_dma))) {
> +			dma_unmap_single(dev, drv_context->tx_dma,
> +				drv_context->len, DMA_TO_DEVICE);
> +			dev_err(dev, "ERROR : rx dma mapping failed\n");
> +			return 0;
> +		}
> +	}
> +	return 1;
> +}
> +
> +/**
> + * drain_trail() - Handle trailing bytes of a transfer
> + * @drv_context:	Pointer to the private driver context
> + *
> + * This function handles the trailing bytes of a transfer for the case
> + * they are not handled by the DMA.
> + */
> +void drain_trail(struct ssp_driver_context *drv_context)
> +{
> +	struct device *dev = &drv_context->pdev->dev;
> +	void *reg = drv_context->ioaddr;
> +
> +	if (drv_context->len != drv_context->len_dma_rx) {
> +		dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
> +			read_SSSR(reg));
> +		drv_context->rx += drv_context->len_dma_rx;
> +		drv_context->tx += drv_context->len_dma_tx;
> +
> +		while ((drv_context->tx != drv_context->tx_end) ||
> +			(drv_context->rx != drv_context->rx_end)) {
> +			drv_context->read(drv_context);
> +			drv_context->write(drv_context);
> +		}
> +	}
> +}
> +
> +/**
> + * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
> + * @drv_context:	Pointer to the private driver context
> + */
> +static void sram_to_ddr_cpy(struct ssp_driver_context *drv_context)
> +{
> +	u32 length = drv_context->len;
> +
> +	if ((drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
> +		&& (drv_context->len > drv_context->rx_fifo_threshold *
> +		drv_context->n_bytes))
> +		length = TRUNCATE(drv_context->len,
> +			drv_context->rx_fifo_threshold * drv_context->n_bytes);
> +
> +	memcpy_fromio(drv_context->rx, drv_context->virt_addr_sram_rx, length);
> +}
> +
> +static void int_transfer_complete(struct ssp_driver_context *drv_context)
> +{
> +	void *reg = drv_context->ioaddr;
> +	struct spi_message *msg;
> +	struct device *dev = &drv_context->pdev->dev;
> +
> +	if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
> +		pm_qos_update_request(&drv_context->pm_qos_req,
> +					PM_QOS_DEFAULT_VALUE);
> +
> +	if (unlikely(drv_context->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
> +		sram_to_ddr_cpy(drv_context);
> +
> +	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL))
> +		drain_trail(drv_context);
> +	else
> +		/* Stop getting Time Outs */
> +		write_SSTO(0, reg);
> +
> +	drv_context->cur_msg->status = 0;
> +	drv_context->cur_msg->actual_length = drv_context->len;
> +
> +#ifdef DUMP_RX
> +	dump_trailer(dev, drv_context->rx, drv_context->len, 16);
> +#endif

If you add a #else clause at the definition point of dump_trailer that
implements an empty version of the function, then the #ifdef/#endif
can be dropped here.

> +
> +	dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
> +	msg = drv_context->cur_msg;
> +	if (likely(msg->complete))
> +		msg->complete(msg->context);
> +}
> +
> +static void int_transfer_complete_work(struct work_struct *work)
> +{
> +	struct ssp_driver_context *drv_context = container_of(work,
> +				struct ssp_driver_context, complete_work);
> +
> +	int_transfer_complete(drv_context);
> +}
> +
> +static void poll_transfer_complete(struct ssp_driver_context *drv_context)
> +{
> +	struct spi_message *msg;
> +
> +	/* Update total byte transfered return count actual bytes read */
> +	drv_context->cur_msg->actual_length +=
> +		drv_context->len - (drv_context->rx_end - drv_context->rx);
> +
> +	drv_context->cur_msg->status = 0;
> +
> +	msg = drv_context->cur_msg;
> +	if (likely(msg->complete))
> +		msg->complete(msg->context);
> +}
> +
> +/**
> + * ssp_int() - Interrupt handler
> + * @irq
> + * @dev_id
> + *
> + * The SSP interrupt is not used for transfer which are handled by
> + * DMA or polling: only under/over run are catched to detect
> + * broken transfers.
> + */
> +static irqreturn_t ssp_int(int irq, void *dev_id)
> +{
> +	struct ssp_driver_context *drv_context = dev_id;
> +	void *reg = drv_context->ioaddr;
> +	struct device *dev = &drv_context->pdev->dev;
> +	u32 status = read_SSSR(reg);
> +
> +	/* It should never be our interrupt since SSP will */
> +	/* only trigs interrupt for under/over run.        */
> +	if (likely(!(status & drv_context->mask_sr)))
> +		return IRQ_NONE;
> +
> +	if (status & SSSR_ROR || status & SSSR_TUR) {
> +		dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",	status);
> +		WARN_ON(1);
> +		if (status & SSSR_ROR)
> +			dev_err(dev, "we have Overrun\n");
> +		if (status & SSSR_TUR)
> +			dev_err(dev, "we have Underrun\n");
> +	}
> +
> +	/* We can fall here when not using DMA mode */
> +	if (!drv_context->cur_msg) {
> +		disable_interface(drv_context);
> +		disable_triggers(drv_context);
> +	}
> +	/* clear status register */
> +	write_SSSR(drv_context->clear_sr, reg);
> +	return IRQ_HANDLED;
> +}
> +
> +static void poll_transfer(unsigned long data)
> +{
> +	struct ssp_driver_context *drv_context =
> +		(struct ssp_driver_context *)data;
> +
> +	if (drv_context->tx)
> +		while (drv_context->tx != drv_context->tx_end) {
> +			drv_context->write(drv_context);
> +			drv_context->read(drv_context);
> +		}
> +
> +	while (!drv_context->read(drv_context))
> +		cpu_relax();
> +
> +	poll_transfer_complete(drv_context);
> +}
> +
> +/**
> + * start_bitbanging() - Clock synchronization by bit banging
> + * @drv_context:	Pointer to private driver context
> + *
> + * This clock synchronization will be removed as soon as it is
> + * handled by the SCU.
> + */
> +static void start_bitbanging(struct ssp_driver_context *drv_context)
> +{
> +	u32 sssr;
> +	u32 count = 0;
> +	u32 cr0;
> +	void *i2c_reg = drv_context->I2C_ioaddr;
> +	struct device *dev = &drv_context->pdev->dev;
> +	void *reg = drv_context->ioaddr;
> +	struct chip_data *chip = spi_get_ctldata(drv_context->cur_msg->spi);
> +	cr0 = chip->cr0;
> +
> +	dev_warn(dev, "In %s : Starting bit banging\n",\
> +		__func__);
> +	if (read_SSSR(reg) & SSP_NOT_SYNC)
> +		dev_warn(dev, "SSP clock desynchronized.\n");
> +	if (!(read_SSCR0(reg) & SSCR0_SSE))
> +		dev_warn(dev, "in SSCR0, SSP disabled.\n");
> +
> +	dev_dbg(dev, "SSP not ready, start CLK sync\n");
> +
> +	write_SSCR0(cr0 & ~SSCR0_SSE, reg);
> +	write_SSPSP(0x02010007, reg);
> +
> +	write_SSTO(chip->timeout, reg);
> +	write_SSCR0(cr0, reg);
> +
> +	/*
> +	*  This routine uses the DFx block to override the SSP inputs
> +	*  and outputs allowing us to bit bang SSPSCLK. On Langwell,
> +	*  we have to generate the clock to clear busy.
> +	*/
> +	write_I2CDATA(0x3, i2c_reg);
> +	udelay(I2C_ACCESS_USDELAY);
> +	write_I2CCTRL(0x01070034, i2c_reg);
> +	udelay(I2C_ACCESS_USDELAY);
> +	write_I2CDATA(0x00000099, i2c_reg);
> +	udelay(I2C_ACCESS_USDELAY);
> +	write_I2CCTRL(0x01070038, i2c_reg);
> +	udelay(I2C_ACCESS_USDELAY);
> +	sssr = read_SSSR(reg);
> +
> +	/* Bit bang the clock until CSS clears */
> +	while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
> +		write_I2CDATA(0x2, i2c_reg);
> +		udelay(I2C_ACCESS_USDELAY);
> +		write_I2CCTRL(0x01070034, i2c_reg);
> +		udelay(I2C_ACCESS_USDELAY);
> +		write_I2CDATA(0x3, i2c_reg);
> +		udelay(I2C_ACCESS_USDELAY);
> +		write_I2CCTRL(0x01070034, i2c_reg);
> +		udelay(I2C_ACCESS_USDELAY);
> +		sssr = read_SSSR(reg);
> +		count++;
> +	}
> +	if (count >= MAX_BITBANGING_LOOP)
> +		dev_err(dev, "ERROR in %s : infinite loop \
> +			on bit banging. Aborting\n", __func__);
> +
> +	dev_dbg(dev, "---Bit bang count=%d\n", count);
> +
> +	write_I2CDATA(0x0, i2c_reg);
> +	udelay(I2C_ACCESS_USDELAY);
> +	write_I2CCTRL(0x01070038, i2c_reg);
> +}
> +
> +static unsigned int ssp_get_clk_div(int speed)
> +{
> +	return max(100000000 / speed, 4) - 1;
> +}
> +
> +/**
> + * transfer() - Start a SPI transfer
> + * @spi:	Pointer to the spi_device struct
> + * @msg:	Pointer to the spi_message struct
> + */
> +static int transfer(struct spi_device *spi, struct spi_message *msg)
> +{
> +	struct ssp_driver_context *drv_context = \
> +	spi_master_get_devdata(spi->master);
> +	struct chip_data *chip = NULL;
> +	struct spi_transfer *transfer = NULL;
> +	void *reg = drv_context->ioaddr;
> +	u32 cr1;
> +	struct device *dev = &drv_context->pdev->dev;
> +	chip = spi_get_ctldata(msg->spi);
> +
> +	msg->actual_length = 0;
> +	msg->status = -EINPROGRESS;
> +	drv_context->cur_msg = msg;
> +
> +	/* We handle only one transfer message since the protocol module has to
> +	   control the out of band signaling. */
> +	transfer = list_entry(msg->transfers.next,
> +					struct spi_transfer,
> +					transfer_list);
> +
> +	/* Check transfer length */
> +	if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
> +		(transfer->len == 0))) {
> +		dev_warn(dev, "transfer length null or greater than %d\n",
> +			MAX_SPI_TRANSFER_SIZE);
> +		dev_warn(dev, "length = %d\n", transfer->len);
> +		msg->status = -EINVAL;
> +
> +		if (msg->complete)
> +			msg->complete(msg->context);
> +
> +		return 0;
> +	}
> +
> +	/* Flush any remaining data (in case of failed previous transfer) */
> +	flush(drv_context);
> +
> +	drv_context->tx  = (void *)transfer->tx_buf;
> +	drv_context->rx  = (void *)transfer->rx_buf;

tx_buf and rx_buf are already void* (well, tx_buf is const void *).
The casts should be unnecessary.  drv_context->tx should probably be a
const too.

> +	drv_context->len = transfer->len;
> +	drv_context->write = chip->write;
> +	drv_context->read = chip->read;
> +
> +	if (likely(chip->dma_enabled)) {
> +		drv_context->dma_mapped = map_dma_buffers(drv_context);
> +		if (unlikely(!drv_context->dma_mapped))
> +			return 0;
> +	} else {
> +		drv_context->write = drv_context->tx ?
> +			chip->write : null_writer;
> +		drv_context->read  = drv_context->rx ?
> +			chip->read : null_reader;
> +	}
> +	drv_context->tx_end = drv_context->tx + transfer->len;
> +	drv_context->rx_end = drv_context->rx + transfer->len;
> +
> +	/* Clear status  */
> +	write_SSSR(drv_context->clear_sr, reg);
> +
> +	/* setup the CR1 control register */
> +	cr1 = chip->cr1 | drv_context->cr1_sig;
> +
> +	if (likely(drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
> +		/* in case of len smaller than burst size, adjust the RX     */
> +		/* threshold. All other cases will use the default threshold */
> +		/* value. The RX fifo threshold must be aligned with the DMA */
> +		/* RX transfer size, which may be limited to a multiple of 4 */
> +		/* bytes due to 32bits DDR access.                           */

Please use continuous comment blocks:

 /*
  * ...
  * ...
  */

This goes for the whole patch.

> +		if  (drv_context->len / drv_context->n_bytes <=
> +			drv_context->rx_fifo_threshold) {
> +			u32 rx_fifo_threshold;
> +
> +			rx_fifo_threshold = (drv_context->len & ~(4 - 1)) /
> +				drv_context->n_bytes;
> +			cr1 &= ~(SSCR1_RFT);
> +			cr1 |= SSCR1_RxTresh(rx_fifo_threshold)
> +					& SSCR1_RFT;
> +		} else {
> +			write_SSTO(chip->timeout, reg);
> +		}
> +	}
> +
> +	dev_dbg(dev,
> +		"transfer len:%d  n_bytes:%d  cr0:%x  cr1:%x",
> +		drv_context->len, drv_context->n_bytes, chip->cr0, cr1);
> +
> +	/* first set CR1 */
> +	write_SSCR1(cr1, reg);
> +
> +	/* Do bitbanging only if SSP not-enabled or not-synchronized */
> +	if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
> +		(!(read_SSCR0(reg) & SSCR0_SSE))) &&
> +		(drv_context->quirks & QUIRKS_BIT_BANGING))) {
> +			start_bitbanging(drv_context);
> +	} else {
> +		/* (re)start the SSP */
> +		write_SSCR0(chip->cr0, reg);
> +	}
> +
> +	if (likely(chip->dma_enabled)) {
> +		if (unlikely(drv_context->quirks & QUIRKS_USE_PM_QOS))
> +			pm_qos_update_request(&drv_context->pm_qos_req,
> +				MIN_EXIT_LATENCY);
> +		dma_transfer(drv_context);
> +	} else {
> +		tasklet_schedule(&drv_context->poll_transfer);
> +	}
> +
> +	return 0;
> +}
> +
> +/**
> + * setup() - Driver setup procedure
> + * @spi:	Pointeur to the spi_device struct
> + */
> +static int setup(struct spi_device *spi)
> +{
> +	struct intel_mid_ssp_spi_chip *chip_info = NULL;
> +	struct chip_data *chip;
> +	struct ssp_driver_context *drv_context =
> +		spi_master_get_devdata(spi->master);
> +	u32 tx_fifo_threshold;
> +	u32 burst_size;
> +	u32 clk_div;
> +
> +	if (!spi->bits_per_word)
> +		spi->bits_per_word = DFLT_BITS_PER_WORD;
> +
> +	if ((spi->bits_per_word < MIN_BITS_PER_WORD
> +		|| spi->bits_per_word > MAX_BITS_PER_WORD))
> +		return -EINVAL;
> +
> +	chip = spi_get_ctldata(spi);
> +	if (!chip) {
> +		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
> +		if (!chip) {
> +			dev_err(&spi->dev,
> +			"failed setup: can't allocate chip data\n");
> +			return -ENOMEM;
> +		}
> +	}
> +	chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
> +		spi->bits_per_word - 16 : spi->bits_per_word)
> +			| SSCR0_SSE
> +			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
> +
> +	/* protocol drivers may change the chip settings, so...  */
> +	/* if chip_info exists, use it                           */
> +	chip_info = spi->controller_data;
> +
> +	/* chip_info isn't always needed */
> +	chip->cr1 = 0;
> +	if (chip_info) {
> +		burst_size = chip_info->burst_size;
> +		if (burst_size > IMSS_FIFO_BURST_8)
> +			burst_size = DFLT_FIFO_BURST_SIZE;
> +
> +		chip->timeout = chip_info->timeout;
> +
> +		if (chip_info->enable_loopback)
> +			chip->cr1 |= SSCR1_LBM;
> +
> +		chip->dma_enabled = chip_info->dma_enabled;
> +
> +	} else {
> +		/* if no chip_info provided by protocol driver, */
> +		/* set default values                           */
> +		dev_info(&spi->dev, "setting default chip values\n");
> +
> +		burst_size = DFLT_FIFO_BURST_SIZE;
> +
> +		chip->dma_enabled = 1;
> +		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
> +			chip->timeout = 0;
> +		else
> +			chip->timeout = DFLT_TIMEOUT_VAL;
> +	}
> +	/* Set FIFO thresholds according to burst_size */
> +	if (burst_size == IMSS_FIFO_BURST_8)
> +		drv_context->rx_fifo_threshold = 8;
> +	else if (burst_size == IMSS_FIFO_BURST_4)
> +		drv_context->rx_fifo_threshold = 4;
> +	else
> +		drv_context->rx_fifo_threshold = 1;
> +	tx_fifo_threshold = SPI_FIFO_SIZE - drv_context->rx_fifo_threshold;
> +	chip->cr1 |= (SSCR1_RxTresh(drv_context->rx_fifo_threshold) &
> +		SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) &
> +		SSCR1_TFT);
> +
> +	drv_context->dma_mapped = 0;
> +
> +	/* setting phase and polarity. spi->mode comes from boardinfo */
> +	if ((spi->mode & SPI_CPHA) != 0)
> +		chip->cr1 |= SSCR1_SPH;
> +	if ((spi->mode & SPI_CPOL) != 0)
> +		chip->cr1 |= SSCR1_SPO;
> +
> +	if (drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
> +		/* set slave mode */
> +		chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
> +	chip->cr1 |= SSCR1_SCFR;        /* clock is not free running */
> +
> +	dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
> +		spi->bits_per_word,
> +		spi->mode & 0x3);
> +	if (spi->bits_per_word <= 8) {
> +		chip->n_bytes = 1;
> +		chip->read = u8_reader;
> +		chip->write = u8_writer;
> +	} else if (spi->bits_per_word <= 16) {
> +		chip->n_bytes = 2;
> +		chip->read = u16_reader;
> +		chip->write = u16_writer;
> +	} else if (spi->bits_per_word <= 32) {
> +		chip->cr0 |= SSCR0_EDSS;
> +		chip->n_bytes = 4;
> +		chip->read = u32_reader;
> +		chip->write = u32_writer;
> +	} else {
> +		dev_err(&spi->dev, "invalid wordsize\n");
> +		return -EINVAL;
> +	}
> +
> +	if ((drv_context->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
> +		chip->speed_hz = spi->max_speed_hz;
> +		clk_div = ssp_get_clk_div(chip->speed_hz);
> +		chip->cr0 |= clk_div << 8;
> +	}
> +	chip->bits_per_word = spi->bits_per_word;
> +
> +	spi_set_ctldata(spi, chip);
> +
> +	/* setup of drv_context members that will not change across transfers */
> +	drv_context->n_bytes = chip->n_bytes;
> +
> +	if (chip->dma_enabled) {
> +		intel_mid_ssp_spi_dma_init(drv_context);
> +		drv_context->cr1_sig  = SSCR1_TSRE | SSCR1_RSRE;
> +		drv_context->mask_sr  = SSSR_ROR | SSSR_TUR;
> +		if (drv_context->quirks & QUIRKS_DMA_USE_NO_TRAIL)
> +			drv_context->cr1_sig  |= SSCR1_TRAIL;
> +	} else {
> +		drv_context->cr1_sig = SSCR1_RIE | SSCR1_TIE | SSCR1_TINTE;
> +		drv_context->mask_sr = SSSR_RFS | SSSR_TFS |
> +				 SSSR_ROR | SSSR_TUR | SSSR_TINT;
> +	}
> +	drv_context->clear_sr = SSSR_TUR  | SSSR_ROR | SSSR_TINT;
> +
> +	return 0;
> +}
> +
> +/**
> + * cleanup() - Driver cleanup procedure
> + * @spi:	Pointer to the spi_device struct
> + */
> +static void cleanup(struct spi_device *spi)
> +{
> +	struct chip_data *chip = spi_get_ctldata(spi);
> +	struct ssp_driver_context *drv_context =
> +		spi_master_get_devdata(spi->master);
> +
> +	if (drv_context->dma_initialized)
> +		intel_mid_ssp_spi_dma_exit(drv_context);
> +
> +	/* Remove the PM_QOS request */
> +	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> +		pm_qos_remove_request(&drv_context->pm_qos_req);
> +
> +	kfree(chip);
> +	spi_set_ctldata(spi, NULL);
> +}
> +
> +/**
> + * intel_mid_ssp_spi_probe() - Driver probe procedure
> + * @pdev:	Pointer to the pci_dev struct
> + * @ent:	Pointer to the pci_device_id struct
> + */
> +static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
> +	const struct pci_device_id *ent)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct spi_master *master;
> +	struct ssp_driver_context *drv_context = 0;
> +	int status;
> +	u32 iolen = 0;
> +	u8 ssp_cfg;
> +	int pos;
> +	void __iomem *syscfg_ioaddr;
> +	unsigned long syscfg;
> +
> +	/* Check if the SSP we are probed for has been allocated */
> +	/* to operate as SPI. This information is retreived from */
> +	/* the field adid of the Vendor-Specific PCI capability  */
> +	/* which is used as a configuration register.            */
> +	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
> +	if (pos > 0) {
> +		pci_read_config_byte(pdev,
> +			pos + VNDR_CAPABILITY_ADID_OFFSET,
> +			&ssp_cfg);
> +	} else {
> +		dev_info(dev, "No Vendor Specific PCI capability\n");
> +		goto err_abort_probe;
> +	}
> +	if (SSP_CFG_GET_MODE(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
> +		dev_info(dev, "Unsupported SSP mode (%02xh)\n",
> +			ssp_cfg);
> +		goto err_abort_probe;
> +	}
> +
> +	dev_info(dev, "found PCI SSP controller"
> +		" (ID: %04xh:%04xh cfg: %02xh)\n",
> +		pdev->vendor, pdev->device, ssp_cfg);
> +
> +	status = pci_enable_device(pdev);
> +	if (status)
> +		return status;
> +
> +	/* Allocate Slave with space for drv_context and null dma buffer */
> +	master = spi_alloc_master(dev, sizeof(struct ssp_driver_context));
> +
> +	if (!master) {
> +		dev_err(dev, "cannot alloc spi_slave\n");
> +		status = -ENOMEM;
> +		goto err_free_0;
> +	}
> +
> +	drv_context = spi_master_get_devdata(master);
> +	drv_context->master = master;
> +
> +	drv_context->pdev = pdev;
> +	drv_context->quirks = ent->driver_data;
> +
> +	/* Set platform & configuration quirks */
> +	if (drv_context->quirks & QUIRKS_PLATFORM_MRST) {
> +		/* Apply bit banging workarround on MRST */
> +		drv_context->quirks |= QUIRKS_BIT_BANGING;
> +		/* MRST slave mode workarrounds */
> +		if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
> +			drv_context->quirks |=
> +				QUIRKS_USE_PM_QOS |
> +				QUIRKS_SRAM_ADDITIONAL_CPY;
> +	}
> +	drv_context->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
> +	if (SSP_CFG_IS_SPI_SLAVE(ssp_cfg))
> +		drv_context->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
> +
> +	master->mode_bits = SPI_CPOL | SPI_CPHA;
> +	master->bus_num = SSP_CFG_GET_SPI_BUS_NB(ssp_cfg);
> +	master->num_chipselect = 1;
> +	master->cleanup = cleanup;
> +	master->setup = setup;
> +	master->transfer = transfer;
> +	drv_context->dma_wq = create_workqueue("intel_mid_ssp_spi");
> +	INIT_WORK(&drv_context->complete_work, int_transfer_complete_work);
> +
> +	drv_context->dma_initialized = 0;
> +
> +	/* get basic io resource and map it */
> +	drv_context->paddr = pci_resource_start(pdev, 0);
> +	iolen = pci_resource_len(pdev, 0);
> +
> +	status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
> +	if (status)
> +		goto err_free_1;
> +
> +	drv_context->ioaddr =
> +		ioremap_nocache(drv_context->paddr, iolen);
> +	if (!drv_context->ioaddr) {
> +		status = -ENOMEM;
> +		goto err_free_2;
> +	}
> +	dev_dbg(dev, "paddr = : %08lx", drv_context->paddr);
> +	dev_dbg(dev, "ioaddr = : %p\n", drv_context->ioaddr);
> +	dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
> +	dev_dbg(dev, "quirks = : %08lx\n", drv_context->quirks);
> +
> +	if (drv_context->quirks & QUIRKS_BIT_BANGING) {
> +		/* Bit banging on the clock is done through */
> +		/* DFT which is available through I2C.      */
> +		/* get base address of I2C_Serbus registers */
> +		drv_context->I2C_paddr = 0xff12b000;
> +		drv_context->I2C_ioaddr =
> +			ioremap_nocache(drv_context->I2C_paddr, 0x10);
> +		if (!drv_context->I2C_ioaddr) {
> +			status = -ENOMEM;
> +			goto err_free_3;
> +		}
> +	}
> +
> +	/* Attach to IRQ */
> +	drv_context->irq = pdev->irq;
> +	status = request_irq(drv_context->irq, ssp_int, IRQF_SHARED,
> +		"intel_mid_ssp_spi", drv_context);
> +	if (status < 0) {
> +		dev_err(&pdev->dev, "can not get IRQ\n");
> +		goto err_free_4;
> +	}
> +
> +	if (drv_context->quirks & QUIRKS_PLATFORM_MDFL) {
> +		/* get base address of DMA selector. */
> +		syscfg = drv_context->paddr - SYSCFG;
> +		syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
> +		if (!syscfg_ioaddr) {
> +			status = -ENOMEM;
> +			goto err_free_5;
> +		}
> +		iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
> +	}
> +
> +	tasklet_init(&drv_context->poll_transfer, poll_transfer,
> +		(unsigned long)drv_context);
> +
> +	/* Register with the SPI framework */
> +	dev_info(dev, "register with SPI framework (bus spi%d)\n",
> +		master->bus_num);
> +
> +	status = spi_register_master(master);
> +
> +	if (status != 0) {
> +		dev_err(dev, "problem registering spi\n");
> +		goto err_free_5;
> +	}
> +
> +	pci_set_drvdata(pdev, drv_context);
> +
> +	/* Create the PM_QOS request */
> +	if (drv_context->quirks & QUIRKS_USE_PM_QOS)
> +		pm_qos_add_request(&drv_context->pm_qos_req,
> +		PM_QOS_CPU_DMA_LATENCY,
> +		PM_QOS_DEFAULT_VALUE);
> +
> +	return status;
> +
> +err_free_5:
> +	free_irq(drv_context->irq, drv_context);
> +err_free_4:
> +	iounmap(drv_context->I2C_ioaddr);
> +err_free_3:
> +	iounmap(drv_context->ioaddr);
> +err_free_2:
> +	pci_release_region(pdev, 0);
> +err_free_1:
> +	spi_master_put(master);
> +err_free_0:
> +	pci_disable_device(pdev);
> +
> +	return status;
> +err_abort_probe:
> +	dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
> +		pdev->vendor, pdev->device);
> +	return -ENODEV;
> +}
> +
> +/**
> + * intel_mid_ssp_spi_remove() - driver remove procedure
> + * @pdev:	Pointer to the pci_dev struct
> + */
> +static void __devexit intel_mid_ssp_spi_remove(struct pci_dev *pdev)
> +{
> +	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> +
> +	if (!drv_context)
> +		return;
> +
> +	/* Release IRQ */
> +	free_irq(drv_context->irq, drv_context);
> +
> +	iounmap(drv_context->ioaddr);
> +	if (drv_context->quirks & QUIRKS_BIT_BANGING)
> +		iounmap(drv_context->I2C_ioaddr);
> +
> +	/* disconnect from the SPI framework */
> +	spi_unregister_master(drv_context->master);
> +
> +	pci_set_drvdata(pdev, NULL);
> +	pci_release_region(pdev, 0);
> +	pci_disable_device(pdev);
> +
> +	return;
> +}
> +
> +#ifdef CONFIG_PM
> +/**
> + * intel_mid_ssp_spi_suspend() - Driver suspend procedure
> + * @pdev:	Pointer to the pci_dev struct
> + * @state:	pm_message_t
> + */
> +static int intel_mid_ssp_spi_suspend(struct pci_dev *pdev, pm_message_t state)
> +{
> +	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> +	dev_dbg(&pdev->dev, "suspend\n");
> +
> +	tasklet_disable(&drv_context->poll_transfer);
> +
> +	return 0;
> +}
> +
> +/**
> + * intel_mid_ssp_spi_resume() - Driver resume procedure
> + * @pdev:	Pointer to the pci_dev struct
> + */
> +static int intel_mid_ssp_spi_resume(struct pci_dev *pdev)
> +{
> +	struct ssp_driver_context *drv_context = pci_get_drvdata(pdev);
> +	dev_dbg(&pdev->dev, "resume\n");
> +
> +	tasklet_enable(&drv_context->poll_transfer);
> +
> +	return 0;
> +}
> +#else
> +#define intel_mid_ssp_spi_suspend NULL
> +#define intel_mid_ssp_spi_resume NULL
> +#endif /* CONFIG_PM */
> +
> +
> +static const struct pci_device_id pci_ids[] __devinitdata = {
> +	/* MRST SSP0 */
> +	{ PCI_VDEVICE(INTEL, 0x0815), QUIRKS_PLATFORM_MRST},
> +	/* MDFL SSP0 */
> +	{ PCI_VDEVICE(INTEL, 0x0832), QUIRKS_PLATFORM_MDFL},
> +	/* MDFL SSP1 */
> +	{ PCI_VDEVICE(INTEL, 0x0825), QUIRKS_PLATFORM_MDFL},
> +	/* MDFL SSP3 */
> +	{ PCI_VDEVICE(INTEL, 0x0816), QUIRKS_PLATFORM_MDFL},
> +	{},
> +};
> +
> +static struct pci_driver intel_mid_ssp_spi_driver = {
> +	.name =		DRIVER_NAME,
> +	.id_table =	pci_ids,
> +	.probe =	intel_mid_ssp_spi_probe,
> +	.remove =	__devexit_p(intel_mid_ssp_spi_remove),
> +	.suspend =	intel_mid_ssp_spi_suspend,
> +	.resume =	intel_mid_ssp_spi_resume,
> +};
> +
> +static int __init intel_mid_ssp_spi_init(void)
> +{
> +	return pci_register_driver(&intel_mid_ssp_spi_driver);
> +}
> +
> +late_initcall(intel_mid_ssp_spi_init);
> +
> +static void __exit intel_mid_ssp_spi_exit(void)
> +{
> +	pci_unregister_driver(&intel_mid_ssp_spi_driver);
> +}
> +
> +module_exit(intel_mid_ssp_spi_exit);
> +
> diff --git a/drivers/spi/intel_mid_ssp_spi.h b/drivers/spi/intel_mid_ssp_spi.h
> new file mode 100644
> index 0000000..aef2fa8
> --- /dev/null
> +++ b/drivers/spi/intel_mid_ssp_spi.h
> @@ -0,0 +1,321 @@
> +/*
> + *  Copyright (C) Intel 2009
> + *  Ken Mills <ken.k.mills@intel.com>
> + *  Sylvain Centelles <sylvain.centelles@intel.com>
> + *
> + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> + *
> + *  This program is free software; you can redistribute it and/or modify
> + *  it under the terms of the GNU General Public License as published by
> + *  the Free Software Foundation; either version 2 of the License, or
> + *  (at your option) any later version.
> + *
> + *  This program is distributed in the hope that it will be useful,
> + *  but WITHOUT ANY WARRANTY; without even the implied warranty of
> + *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + *  GNU General Public License for more details.
> + *
> + *  You should have received a copy of the GNU General Public License
> + *  along with this program; if not, write to the Free Software
> + *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
> + *
> + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> + *
> + */
> +#ifndef INTEL_MID_SSP_SPI_H_
> +#define INTEL_MID_SSP_SPI_H_
> +
> +#define PCI_MRST_DMAC1_ID	0x0814
> +#define PCI_MDFL_DMAC1_ID	0x0827
> +
> +#define SSP_NOT_SYNC 0x400000
> +#define MAX_SPI_TRANSFER_SIZE 8192
> +#define MAX_BITBANGING_LOOP   10000
> +#define SPI_FIFO_SIZE 16
> +
> +/* PM QoS define */
> +#define MIN_EXIT_LATENCY 20
> +
> +/* SSP assignement configuration from PCI config */
> +#define SSP_CFG_GET_MODE(ssp_cfg)	((ssp_cfg) & 0x07)
> +#define SSP_CFG_GET_SPI_BUS_NB(ssp_cfg)	(((ssp_cfg) >> 3) & 0x07)
> +#define SSP_CFG_IS_SPI_SLAVE(ssp_cfg)	((ssp_cfg) & 0x40)
> +#define SSP_CFG_SPI_MODE_ID		1
> +/* adid field offset is 6 inside the vendor specific capability */
> +#define VNDR_CAPABILITY_ADID_OFFSET	6
> +
> +/* Driver's quirk flags */
> +/* This workarround bufferizes data in the audio fabric SDRAM from  */
> +/* where the DMA transfers will operate. Should be enabled only for */
> +/* SPI slave mode.                                                  */
> +#define QUIRKS_SRAM_ADDITIONAL_CPY	1
> +/* If set the trailing bytes won't be handled by the DMA.           */
> +/* Trailing byte feature not fully available.                       */
> +#define QUIRKS_DMA_USE_NO_TRAIL		2
> +/* If set, the driver will use PM_QOS to reduce the latency         */
> +/* introduced by the deeper C-states which may produce over/under   */
> +/* run issues. Must be used in slave mode. In master mode, the      */
> +/* latency is not critical, but setting this workarround  may       */
> +/* improve the SPI throughput.                                      */
> +#define QUIRKS_USE_PM_QOS		4
> +/* This quirks is set on Moorestown                                 */
> +#define QUIRKS_PLATFORM_MRST		8
> +/* This quirks is set on Medfield                                   */
> +#define QUIRKS_PLATFORM_MDFL		16
> +/* If set, the driver will apply the bitbanging workarround needed  */
> +/* to enable defective Langwell stepping A SSP. The defective SSP   */
> +/* can be enabled only once, and should never be disabled.          */
> +#define QUIRKS_BIT_BANGING		32
> +/* If set, SPI is in slave clock mode                               */
> +#define QUIRKS_SPI_SLAVE_CLOCK_MODE	64
> +
> +/* Uncomment to get RX and TX short dumps after each transfer */
> +/* #define DUMP_RX 1 */
> +#define MAX_TRAILING_BYTE_RETRY 16
> +#define MAX_TRAILING_BYTE_LOOP 100
> +#define DELAY_TO_GET_A_WORD 3
> +#define DFLT_TIMEOUT_VAL 500
> +
> +#define DEFINE_SSP_REG(reg, off) \
> +static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
> +static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
> +
> +#define RX_DIRECTION 0
> +#define TX_DIRECTION 1
> +
> +#define I2C_ACCESS_USDELAY 10
> +
> +#define DFLT_BITS_PER_WORD 16
> +#define MIN_BITS_PER_WORD     4
> +#define MAX_BITS_PER_WORD     32
> +#define DFLT_FIFO_BURST_SIZE	IMSS_FIFO_BURST_8
> +
> +#define TRUNCATE(x, a) ((x) & ~((a)-1))
> +
> +DEFINE_SSP_REG(SSCR0, 0x00)
> +DEFINE_SSP_REG(SSCR1, 0x04)
> +DEFINE_SSP_REG(SSSR, 0x08)
> +DEFINE_SSP_REG(SSITR, 0x0c)
> +DEFINE_SSP_REG(SSDR, 0x10)
> +DEFINE_SSP_REG(SSTO, 0x28)
> +DEFINE_SSP_REG(SSPSP, 0x2c)
> +
> +DEFINE_SSP_REG(I2CCTRL, 0x00);
> +DEFINE_SSP_REG(I2CDATA, 0x04);
> +
> +DEFINE_SSP_REG(GPLR1, 0x04);
> +DEFINE_SSP_REG(GPDR1, 0x0c);
> +DEFINE_SSP_REG(GPSR1, 0x14);
> +DEFINE_SSP_REG(GPCR1, 0x1C);
> +DEFINE_SSP_REG(GAFR1_U, 0x44);
> +
> +#define SYSCFG  0x20bc0
> +
> +#define SRAM_BASE_ADDR 0xfffdc000
> +#define SRAM_RX_ADDR   SRAM_BASE_ADDR
> +#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
> +
> +#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
> +#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
> +#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
> +#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
> +#define SSCR0_ECS   (1 << 6) /* External clock select */
> +#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
> +
> +#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
> +#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
> +#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
> +#define SSCR0_NCS   (1 << 21)           /* Network clock select */
> +#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
> +#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
> +#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
> +#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
> +#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
> +#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
> +
> +#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
> +#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
> +#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
> +#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
> +#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
> +#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
> +#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
> +#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
> +#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
> +#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
> +
> +#define SSSR_TNF		(1 << 2)	/* Tx FIFO Not Full */
> +#define SSSR_RNE		(1 << 3)	/* Rx FIFO Not Empty */
> +#define SSSR_BSY		(1 << 4)	/* SSP Busy */
> +#define SSSR_TFS		(1 << 5)	/* Tx FIFO Service Request */
> +#define SSSR_RFS		(1 << 6)	/* Rx FIFO Service Request */
> +#define SSSR_ROR		(1 << 7)	/* Rx FIFO Overrun */
> +#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
> +
> +#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
> +#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
> +#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
> +#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
> +
> +#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
> +#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
> +#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
> +#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
> +#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
> +#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
> +#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
> +#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
> +#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
> +#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
> +#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
> +#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
> +#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
> +#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
> +#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
> +#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
> +#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
> +#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
> +#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
> +
> +#define SSSR_BCE         (1 << 23) /* Bit Count Error */
> +#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
> +#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
> +#define SSSR_EOC         (1 << 20) /* End Of Chain */
> +#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
> +#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
> +
> +#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
> +#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
> +#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
> +#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
> +#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
> +#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
> +#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
> +#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
> +#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
> +
> +/*
> + * For testing SSCR1 changes that require SSP restart, basically
> + * everything except the service and interrupt enables
> + */
> +
> +#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
> +				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
> +				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
> +				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
> +				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
> +				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
> +
> +struct callback_param {
> +	void *drv_context;
> +	u32 direction;
> +};
> +
> +struct ssp_driver_context {
> +	/* Driver model hookup */
> +	struct pci_dev *pdev;
> +
> +	/* SPI framework hookup */
> +	struct spi_master *master;
> +
> +	/* SSP register addresses */
> +	unsigned long paddr;
> +	void *ioaddr;
> +	int irq;
> +
> +	/* I2C registers */
> +	dma_addr_t I2C_paddr;
> +	void *I2C_ioaddr;
> +
> +	/* SSP masks*/
> +	u32 cr1_sig;
> +	u32 cr1;
> +	u32 clear_sr;
> +	u32 mask_sr;
> +
> +	/* PM_QOS request */
> +	struct pm_qos_request_list pm_qos_req;
> +
> +	struct tasklet_struct poll_transfer;
> +
> +	spinlock_t lock;
> +
> +	/* Current message transfer state info */
> +	struct spi_message *cur_msg;
> +	size_t len;
> +	size_t len_dma_rx;
> +	size_t len_dma_tx;
> +	void *tx;
> +	void *tx_end;
> +	void *rx;
> +	void *rx_end;
> +	bool dma_initialized;
> +	int dma_mapped;
> +	dma_addr_t rx_dma;
> +	dma_addr_t tx_dma;
> +	u8 n_bytes;
> +	int (*write)(struct ssp_driver_context *drv_context);
> +	int (*read)(struct ssp_driver_context *drv_context);
> +
> +	struct intel_mid_dma_slave    dmas_tx;
> +	struct intel_mid_dma_slave    dmas_rx;
> +	struct dma_chan    *txchan;
> +	struct dma_chan    *rxchan;
> +	struct workqueue_struct *dma_wq;
> +	struct work_struct complete_work;
> +
> +	u8 __iomem *virt_addr_sram_tx;
> +	u8 __iomem *virt_addr_sram_rx;
> +
> +	int txdma_done;
> +	int rxdma_done;
> +	struct callback_param tx_param;
> +	struct callback_param rx_param;
> +	struct pci_dev *dmac1;
> +
> +	unsigned long quirks;
> +	u32 rx_fifo_threshold;
> +};
> +
> +struct chip_data {
> +	u32 cr0;
> +	u32 cr1;
> +	u32 timeout;
> +	u8 n_bytes;
> +	u8 dma_enabled;
> +	u8 bits_per_word;
> +	u32 speed_hz;
> +	int (*write)(struct ssp_driver_context *drv_context);
> +	int (*read)(struct ssp_driver_context *drv_context);
> +};
> +
> +
> +enum intel_mid_ssp_spi_fifo_burst {
> +	IMSS_FIFO_BURST_1,
> +	IMSS_FIFO_BURST_4,
> +	IMSS_FIFO_BURST_8
> +};
> +
> +/* spi_board_info.controller_data for SPI slave devices,
> + * copied to spi_device.platform_data ... mostly for dma tuning
> + */
> +struct intel_mid_ssp_spi_chip {
> +	enum intel_mid_ssp_spi_fifo_burst burst_size;
> +	u32 timeout;
> +	u8 enable_loopback;
> +	u8 dma_enabled;
> +};
> +
> +
> +#define SPI_DIB_NAME_LEN  16
> +#define SPI_DIB_SPEC_INFO_LEN      10
> +
> +struct spi_dib_header {
> +	u32       signature;
> +	u32       length;
> +	u8         rev;
> +	u8         checksum;
> +	u8         dib[0];
> +} __attribute__((packed));
> +
> +#endif /*INTEL_MID_SSP_SPI_H_*/

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2011-02-14 19:09 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <[PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011>
2011-02-02 21:01 ` [PATCH 0/1] Adding intel_mid_ssp_spi driver : 01/27/2011 Russ Gorby
2011-02-12  9:19   ` Grant Likely
2011-02-14 17:22     ` Gorby, Russ
2011-02-02 21:01 ` Russ Gorby
2011-02-02 21:01 ` [PATCH 1/1] spi: intel_mid_ssp_spi: new SPI driver for intel Medfield platform Russ Gorby
2011-02-02 21:01 ` Russ Gorby
2011-02-02 21:03   ` Mark Brown
2011-02-02 22:26   ` Alan Cox
2011-02-02 22:40   ` Alan Cox
2011-02-03 13:28     ` Mark Brown
2011-02-03 15:04       ` Alan Cox
2011-02-03 15:06         ` Mark Brown
2011-02-14 19:09     ` Grant Likely

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.