All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lucas Tanure <tanureal@opensource.cirrus.com>
To: Mark Brown <broonie@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	"Rafael J . Wysocki" <rafael@kernel.org>,
	Sanjay R Mehta <sanju.mehta@amd.com>,
	Nehal Bakulchandra Shah <Nehal-Bakulchandra.shah@amd.com>
Cc: <linux-kernel@vger.kernel.org>, <linux-spi@vger.kernel.org>,
	<patches@opensource.cirrus.com>,
	Lucas Tanure <tanureal@opensource.cirrus.com>
Subject: [PATCH 08/10] spi: amd: Fill FIFO buffer with the whole message
Date: Wed, 8 Sep 2021 12:34:49 +0100	[thread overview]
Message-ID: <20210908113450.788452-9-tanureal@opensource.cirrus.com> (raw)
In-Reply-To: <20210908113450.788452-1-tanureal@opensource.cirrus.com>

The controller is half-duplex, in that it cannot
read data while it is sending data. But the FIFO
is full-duplex, the writes and reads must be
queued and executed together, and the read data
will be offset in the FIFO by the length of the
initial write data (as it would in a full-duplex
SPI).

And the controller has an automatic CS which can
only be activated during the transmission of the
FIFO, which can make read|write data lose meaning
as the CS will be toggle after the required
read|write address.
To avoid that set the max transfer and message
size as AMD_SPI_FIFO_SIZE ensuring that incoming
messages always fit inside a FIFO buffer

Signed-off-by: Lucas Tanure <tanureal@opensource.cirrus.com>
---
 drivers/spi/spi-amd.c | 193 +++++++++++++++++++++++++++---------------
 1 file changed, 125 insertions(+), 68 deletions(-)

diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 99b2b0ccff08..0face11740ea 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -4,7 +4,8 @@
 //
 // Copyright (c) 2020, Advanced Micro Devices, Inc.
 //
-// Author: Sanjay R Mehta <sanju.mehta@amd.com>
+// Authors: Sanjay R Mehta <sanju.mehta@amd.com>
+//          Lucas Tanure <tanureal@opensource.cirrus.com>
 
 #include <linux/acpi.h>
 #include <linux/init.h>
@@ -28,6 +29,7 @@
 #define AMD_SPI_RX_COUNT_REG	0x4B
 #define AMD_SPI_STATUS_REG	0x4C
 
+#define AMD_SPI_FIFO_SIZE	70
 #define AMD_SPI_MEM_SIZE	200
 
 /* M_CMD OP codes for SPI */
@@ -38,6 +40,13 @@ struct amd_spi {
 	void __iomem *io_remap_addr;
 	unsigned long io_base_addr;
 	u32 rom_addr;
+	struct list_head rbuf_head;
+};
+
+struct amd_spi_read_buffer {
+	struct list_head node;
+	u8 *buf;
+	u8 len;
 };
 
 static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
@@ -138,83 +147,127 @@ static int amd_spi_master_setup(struct spi_device *spi)
 	return 0;
 }
 
-static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
-				    struct spi_master *master,
-				    struct spi_message *message)
+static void amd_spi_clear_list(struct amd_spi *amd_spi)
 {
-	struct spi_transfer *xfer = NULL;
-	u8 cmd_opcode;
-	u8 *buf = NULL;
-	u32 m_cmd = 0;
-	u32 i = 0;
-	u32 tx_len = 0, rx_len = 0;
-
-	list_for_each_entry(xfer, &message->transfers,
-			    transfer_list) {
-		if (xfer->rx_buf)
-			m_cmd = AMD_SPI_XFER_RX;
-		if (xfer->tx_buf)
-			m_cmd = AMD_SPI_XFER_TX;
-
-		if (m_cmd & AMD_SPI_XFER_TX) {
-			buf = (u8 *)xfer->tx_buf;
-			tx_len = xfer->len - 1;
-			cmd_opcode = *(u8 *)xfer->tx_buf;
-			buf++;
-			amd_spi_set_opcode(amd_spi, cmd_opcode);
-
-			/* Write data into the FIFO. */
-			for (i = 0; i < tx_len; i++) {
-				iowrite8(buf[i], ((u8 __iomem *)amd_spi->io_remap_addr +
-					 AMD_SPI_FIFO_BASE + i));
-			}
+	struct amd_spi_read_buffer *rbuf, *tmp;
 
-			amd_spi_set_tx_count(amd_spi, tx_len);
-			amd_spi_clear_fifo_ptr(amd_spi);
-			/* Execute command */
-			amd_spi_execute_opcode(amd_spi);
-		}
-		if (m_cmd & AMD_SPI_XFER_RX) {
-			/*
-			 * Store no. of bytes to be received from
-			 * FIFO
-			 */
-			rx_len = xfer->len;
-			buf = (u8 *)xfer->rx_buf;
-			amd_spi_set_rx_count(amd_spi, rx_len);
-			amd_spi_clear_fifo_ptr(amd_spi);
-			/* Execute command */
-			amd_spi_execute_opcode(amd_spi);
-			/* Read data from FIFO to receive buffer  */
-			for (i = 0; i < rx_len; i++)
-				buf[i] = amd_spi_readreg8(amd_spi, AMD_SPI_FIFO_BASE + tx_len + i);
-		}
+	list_for_each_entry_safe(rbuf, tmp, &amd_spi->rbuf_head, node) {
+		list_del(&rbuf->node);
+		kfree(rbuf);
 	}
+}
 
-	/* Update statistics */
-	message->actual_length = tx_len + rx_len + 1;
-	/* complete the transaction */
-	message->status = 0;
-	spi_finalize_current_message(master);
+static int amd_spi_transfer(struct amd_spi *amd_spi, u8 opcode, u8 tx_len, u8 rx_len, u8 fifo_pos)
+{
+	struct amd_spi_read_buffer *rbuf;
+	struct list_head *p;
+	int ret, i;
+
+	amd_spi_set_opcode(amd_spi, opcode);
+	amd_spi_set_tx_count(amd_spi, tx_len);
+	amd_spi_set_rx_count(amd_spi, rx_len);
+
+	ret = amd_spi_execute_opcode(amd_spi);
+	if (ret)
+		return ret;
+
+	if (!list_empty(&amd_spi->rbuf_head)) {
+		ret = amd_spi_busy_wait(amd_spi);
+		if (ret)
+			return ret;
+		list_for_each(p, &amd_spi->rbuf_head) {
+			rbuf = list_entry(p, struct amd_spi_read_buffer, node);
+			for (i = 0; i < rbuf->len; i++)
+				rbuf->buf[i] = amd_spi_readreg8(amd_spi, fifo_pos++);
+		}
+		amd_spi_clear_list(amd_spi);
+	}
 
 	return 0;
 }
 
-static int amd_spi_master_transfer(struct spi_master *master,
-				   struct spi_message *msg)
+/* amd_spi_master_transfer expects a spi_message with no more than AMD_SPI_FIFO_SIZE and no TX after
+ * a RX in the same CS
+ * The CS can not be held between two amd_spi_execute_opcode so fill the FIFO with all transfers
+ * until the first RX transfer
+ */
+static int amd_spi_transfer_one_message(struct spi_controller *ctrl, struct spi_message *msg)
 {
-	struct amd_spi *amd_spi = spi_master_get_devdata(master);
-	struct spi_device *spi = msg->spi;
+	struct amd_spi *amd_spi = spi_master_get_devdata(ctrl);
+	u8 tx_len = 0, rx_len = 0, opcode = 0, fifo_pos = AMD_SPI_FIFO_BASE;
+	struct amd_spi_read_buffer *rbuf;
+	struct spi_transfer *xfer;
+	u8 *tx_buf;
+	int ret, i;
+
+	amd_spi_select_chip(amd_spi, msg->spi->chip_select);
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (xfer->tx_buf) {
+			tx_buf = (u8 *)xfer->tx_buf;
+			if (!tx_len) {
+				opcode = tx_buf[0];
+				xfer->len--;
+				tx_buf++;
+			}
+			tx_len += xfer->len;
+			for (i = 0; i < xfer->len; i++)
+				amd_spi_writereg8(amd_spi, fifo_pos++, tx_buf[i]);
+		}
 
-	amd_spi_select_chip(amd_spi, spi->chip_select);
+		if (xfer->rx_buf) {
+			rx_len += xfer->len;
+			rbuf = kmalloc(sizeof(*rbuf), GFP_KERNEL);
+			if (!rbuf) {
+				ret = -ENOMEM;
+				goto complete;
+			}
 
-	/*
-	 * Extract spi_transfers from the spi message and
-	 * program the controller.
-	 */
-	amd_spi_fifo_xfer(amd_spi, master, msg);
+			rbuf->buf = (u8 *)xfer->rx_buf;
+			rbuf->len = xfer->len;
+			list_add(&rbuf->node, &amd_spi->rbuf_head);
+		}
 
-	return 0;
+		if (xfer->cs_change) {
+			ret = amd_spi_transfer(amd_spi, opcode, tx_len, rx_len, fifo_pos);
+			if (ret)
+				goto complete;
+
+			msg->actual_length += rx_len;
+			if (tx_len)
+				msg->actual_length += tx_len + 1;
+
+			fifo_pos = AMD_SPI_FIFO_BASE;
+			opcode = 0;
+			tx_len = 0;
+			rx_len = 0;
+		}
+	}
+
+	if (tx_len || rx_len) {
+		ret = amd_spi_transfer(amd_spi, opcode, tx_len, rx_len, fifo_pos);
+		if (ret)
+			goto complete;
+
+		msg->actual_length += rx_len;
+		if (tx_len)
+			msg->actual_length += tx_len + 1;
+	}
+	ret = 0;
+
+complete:
+	if (!list_empty(&amd_spi->rbuf_head))
+		amd_spi_clear_list(amd_spi);
+	/* complete the transaction */
+	msg->status = ret;
+	spi_finalize_current_message(ctrl);
+
+	return ret;
+}
+
+static size_t amd_spi_max_transfer_size(struct spi_device *spi)
+{
+	return AMD_SPI_FIFO_SIZE;
 }
 
 static int amd_spi_probe(struct platform_device *pdev)
@@ -244,9 +297,13 @@ static int amd_spi_probe(struct platform_device *pdev)
 	master->bus_num = 0;
 	master->num_chipselect = 4;
 	master->mode_bits = 0;
-	master->flags = SPI_MASTER_HALF_DUPLEX;
+	master->flags = SPI_CONTROLLER_HALF_DUPLEX | SPI_CONTROLLER_NO_TX_RX_CS;
 	master->setup = amd_spi_master_setup;
-	master->transfer_one_message = amd_spi_master_transfer;
+	master->max_transfer_size = amd_spi_max_transfer_size;
+	master->max_message_size = amd_spi_max_transfer_size;
+	master->transfer_one_message = amd_spi_transfer_one_message;
+
+	INIT_LIST_HEAD(&amd_spi->rbuf_head);
 
 	/* Register the controller with SPI framework */
 	err = devm_spi_register_master(dev, master);
-- 
2.33.0


  parent reply	other threads:[~2021-09-08 11:35 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-08 11:34 [PATCH v2 00/10] Improve support for AMD SPI controllers Lucas Tanure
2021-09-08 11:34 ` [PATCH 01/10] regmap: spi: Set regmap max raw r/w from max_transfer_size Lucas Tanure
2021-09-08 11:34 ` [PATCH 02/10] regmap: spi: Check raw_[read|write] against max message size Lucas Tanure
2021-09-08 13:09   ` Charles Keepax
2021-09-08 13:17     ` Charles Keepax
2021-09-08 11:34 ` [PATCH 03/10] spi: Add flag for no TX after a RX in the same Chip Select Lucas Tanure
2021-09-08 12:37   ` Mark Brown
2021-09-09 10:51     ` Lucas tanure
2021-09-10 14:44       ` Mark Brown
2021-09-08 11:34 ` [PATCH 04/10] spi: amd: Refactor code to use less spi_master_get_devdata Lucas Tanure
2021-09-08 11:34 ` [PATCH 05/10] spi: amd: Refactor amd_spi_busy_wait Lucas Tanure
2021-09-08 11:34 ` [PATCH 06/10] spi: amd: Remove unneeded variable Lucas Tanure
2021-09-08 11:34 ` [PATCH 07/10] spi: amd: Check for idle bus before execute opcode Lucas Tanure
2021-09-08 11:34 ` Lucas Tanure [this message]
2021-09-08 13:22   ` [PATCH 08/10] spi: amd: Fill FIFO buffer with the whole message Charles Keepax
2021-09-08 11:34 ` [PATCH 09/10] spi: amd: Add support for latest platform Lucas Tanure
2021-09-12 21:53   ` Gabriel Krisman Bertazi
2021-09-08 12:28 ` [PATCH v2 00/10] Improve support for AMD SPI controllers Mark Brown

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210908113450.788452-9-tanureal@opensource.cirrus.com \
    --to=tanureal@opensource.cirrus.com \
    --cc=Nehal-Bakulchandra.shah@amd.com \
    --cc=broonie@kernel.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-spi@vger.kernel.org \
    --cc=patches@opensource.cirrus.com \
    --cc=rafael@kernel.org \
    --cc=sanju.mehta@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.