linux-tegra.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jarkko Sakkinen <jarkko@kernel.org>
To: Krishna Yarlagadda <kyarlagadda@nvidia.com>
Cc: robh+dt@kernel.org, broonie@kernel.org, peterhuewe@gmx.de,
	jgg@ziepe.ca, krzysztof.kozlowski+dt@linaro.org,
	linux-spi@vger.kernel.org, linux-tegra@vger.kernel.org,
	linux-integrity@vger.kernel.org, linux-kernel@vger.kernel.org,
	thierry.reding@gmail.com, jonathanh@nvidia.com,
	skomatineni@nvidia.com, ldewangan@nvidia.com
Subject: Re: [Patch V9 2/3] tpm_tis-spi: Add hardware wait polling
Date: Thu, 30 Mar 2023 01:41:07 +0300	[thread overview]
Message-ID: <20230329224107.5y3anydp4fnmnehe@kernel.org> (raw)
In-Reply-To: <20230325183409.7695-3-kyarlagadda@nvidia.com>

On Sun, Mar 26, 2023 at 12:04:08AM +0530, Krishna Yarlagadda wrote:
> TPM devices may insert wait state on last clock cycle of ADDR phase.
> For SPI controllers that support full-duplex transfers, this can be
> detected using software by reading the MISO line. For SPI controllers
> that only support half-duplex transfers, such as the Tegra QSPI, it is
> not possible to detect the wait signal from software. The QSPI
> controller in Tegra234 and Tegra241 implement hardware detection of the
> wait signal which can be enabled in the controller for TPM devices.
> 
> The current TPM TIS driver only supports software detection of the wait
> signal. To support SPI controllers that use hardware to detect the wait
> signal, add the function tpm_tis_spi_hw_flow_transfer() and move the
> existing code for software based detection into a function called
> tpm_tis_spi_sw_flow_transfer(). SPI controllers that only support
> half-duplex transfers will always call tpm_tis_spi_hw_flow_transfer()
> because they cannot support software based detection. The bit
> SPI_TPM_HW_FLOW is set to indicate to the SPI controller that hardware
> detection is required and it is the responsibility of the SPI controller
> driver to determine if this is supported or not.
> 
> For hardware flow control, CMD-ADDR-DATA messages are combined into a
> single message where as for software flow control exiting method of
> CMD-ADDR in a message and DATA in another is followed.
> 
> Signed-off-by: Krishna Yarlagadda <kyarlagadda@nvidia.com>
> ---
>  drivers/char/tpm/tpm_tis_spi_main.c | 91 ++++++++++++++++++++++++++++-
>  1 file changed, 89 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
> index a0963a3e92bd..db9afd0b83da 100644
> --- a/drivers/char/tpm/tpm_tis_spi_main.c
> +++ b/drivers/char/tpm/tpm_tis_spi_main.c
> @@ -71,8 +71,74 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
>  	return 0;
>  }
>  
> -int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
> -			 u8 *in, const u8 *out)
> +/*
> + * Half duplex controller with support for TPM wait state detection like
> + * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
> + * control. Each phase sent in different transfer for controller to idenity
> + * phase.
> + */
> +static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data,	u32 addr,
> +				     u16 len, u8 *in, const u8 *out)
> +{
> +	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
> +	struct spi_transfer spi_xfer[3];
> +	struct spi_message m;
> +	u8 transfer_len;
> +	int ret;
> +
> +	while (len) {
> +		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
> +
> +		spi_message_init(&m);
> +		phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
> +		phy->iobuf[1] = 0xd4;
> +		phy->iobuf[2] = addr >> 8;
> +		phy->iobuf[3] = addr;
> +
> +		memset(&spi_xfer, 0, sizeof(spi_xfer));
> +
> +		spi_xfer[0].tx_buf = phy->iobuf;
> +		spi_xfer[0].len = 1;
> +		spi_message_add_tail(&spi_xfer[0], &m);
> +
> +		spi_xfer[1].tx_buf = phy->iobuf + 1;
> +		spi_xfer[1].len = 3;
> +		spi_message_add_tail(&spi_xfer[1], &m);
> +
> +		if (out) {
> +			spi_xfer[2].tx_buf = &phy->iobuf[4];
> +			spi_xfer[2].rx_buf = NULL;
> +			memcpy(&phy->iobuf[4], out, transfer_len);
> +			out += transfer_len;
> +		}
> +
> +		if (in) {
> +			spi_xfer[2].tx_buf = NULL;
> +			spi_xfer[2].rx_buf = &phy->iobuf[4];
> +		}
> +
> +		spi_xfer[2].len = transfer_len;
> +		spi_message_add_tail(&spi_xfer[2], &m);
> +
> +		reinit_completion(&phy->ready);
> +
> +		ret = spi_sync_locked(phy->spi_device, &m);
> +		if (ret < 0)
> +			return ret;
> +
> +		if (in) {
> +			memcpy(in, &phy->iobuf[4], transfer_len);
> +			in += transfer_len;
> +		}
> +
> +		len -= transfer_len;
> +	}
> +
> +	return ret;
> +}
> +
> +static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
> +				     u16 len, u8 *in, const u8 *out)
>  {
>  	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
>  	int ret = 0;
> @@ -140,6 +206,24 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
>  	return ret;
>  }
>  
> +int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
> +			 u8 *in, const u8 *out)
> +{
> +	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
> +	struct spi_controller *ctlr = phy->spi_device->controller;
> +
> +	/*
> +	 * TPM flow control over SPI requires full duplex support.
> +	 * Send entire message to a half duplex controller to handle
> +	 * wait polling in controller.
> +	 * Set TPM HW flow control flag..
> +	 */
> +	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
> +		return tpm_tis_spi_transfer_half(data, addr, len, in, out);
> +	else
> +		return tpm_tis_spi_transfer_full(data, addr, len, in, out);
> +}
> +
>  static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
>  				  u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
>  {
> @@ -181,6 +265,9 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
>  
>  	phy->flow_control = tpm_tis_spi_flow_control;
>  
> +	if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
> +		dev->mode |= SPI_TPM_HW_FLOW;
> +
>  	/* If the SPI device has an IRQ then use that */
>  	if (dev->irq > 0)
>  		irq = dev->irq;
> -- 
> 2.17.1
> 


Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>

BR, Jarkko

  reply	other threads:[~2023-03-29 22:41 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-25 18:34 [Patch V9 0/3] Tegra TPM driver with HW flow control Krishna Yarlagadda
2023-03-25 18:34 ` [Patch V9 1/3] spi: Add TPM HW flow flag Krishna Yarlagadda
2023-03-29 22:40   ` Jarkko Sakkinen
2023-03-25 18:34 ` [Patch V9 2/3] tpm_tis-spi: Add hardware wait polling Krishna Yarlagadda
2023-03-29 22:41   ` Jarkko Sakkinen [this message]
2023-04-20  2:32   ` Jerry Snitselaar
2023-04-20  3:11     ` Jerry Snitselaar
2023-04-20 17:38       ` Krishna Yarlagadda
2023-03-25 18:34 ` [Patch V9 3/3] spi: tegra210-quad: Enable TPM " Krishna Yarlagadda
2023-03-29 22:18 ` [Patch V9 0/3] Tegra TPM driver with HW flow control Jarkko Sakkinen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230329224107.5y3anydp4fnmnehe@kernel.org \
    --to=jarkko@kernel.org \
    --cc=broonie@kernel.org \
    --cc=jgg@ziepe.ca \
    --cc=jonathanh@nvidia.com \
    --cc=krzysztof.kozlowski+dt@linaro.org \
    --cc=kyarlagadda@nvidia.com \
    --cc=ldewangan@nvidia.com \
    --cc=linux-integrity@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-spi@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=peterhuewe@gmx.de \
    --cc=robh+dt@kernel.org \
    --cc=skomatineni@nvidia.com \
    --cc=thierry.reding@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).