linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Kishon Vijay Abraham I <kishon@ti.com>
To: Bjorn Helgaas <bhelgaas@google.com>,
	Rob Herring <robh+dt@kernel.org>,
	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>,
	Robin Murphy <robin.murphy@arm.com>,
	Tom Joseph <tjoseph@cadence.com>
Cc: <linux-pci@vger.kernel.org>, <devicetree@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>
Subject: Re: [PATCH v3 4/4] PCI: cadence: Use "dma-ranges" instead of "cdns,no-bar-match-nbits" property
Date: Mon, 18 May 2020 16:42:07 +0530	[thread overview]
Message-ID: <b30c9882-b8f5-44e3-bfcd-7faefddbc4af@ti.com> (raw)
In-Reply-To: <20200508130646.23939-5-kishon@ti.com>

Hi RobH, Robin,

On 5/8/2020 6:36 PM, Kishon Vijay Abraham I wrote:
> Cadence PCIe core driver (host mode) uses "cdns,no-bar-match-nbits"
> property to configure the number of bits passed through from PCIe
> address to internal address in Inbound Address Translation register.
> This only used the NO MATCH BAR.
> 
> However standard PCI dt-binding already defines "dma-ranges" to
> describe the address ranges accessible by PCIe controller. Add support
> in Cadence PCIe host driver to parse dma-ranges and configure the
> inbound regions for BAR0, BAR1 and NO MATCH BAR. Cadence IP specifies
> maximum size for BAR0 as 256GB, maximum size for BAR1 as 2 GB, so if
> the dma-ranges specifies a size larger than the maximum allowed, the
> driver will split and configure the BARs.
> 
> Legacy device tree binding compatibility is maintained by retaining
> support for "cdns,no-bar-match-nbits".

Do you have any comments for this patch?

Tom,

where you able to test this in Cadence platform?

Thanks
Kishon

> 
> Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
> ---
>  .../controller/cadence/pcie-cadence-host.c    | 141 ++++++++++++++++--
>  drivers/pci/controller/cadence/pcie-cadence.h |  17 ++-
>  2 files changed, 141 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
> index 6ecebb79057a..2485ecd8434d 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence-host.c
> +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
> @@ -11,6 +11,12 @@
>  
>  #include "pcie-cadence.h"
>  
> +static u64 cdns_rp_bar_max_size[] = {
> +	[RP_BAR0] = _ULL(128 * SZ_2G),
> +	[RP_BAR1] = SZ_2G,
> +	[RP_NO_BAR] = SZ_64T,
> +};
> +
>  void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
>  			       int where)
>  {
> @@ -106,6 +112,117 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
>  	return 0;
>  }
>  
> +static void cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
> +					 enum cdns_pcie_rp_bar bar,
> +					 u64 cpu_addr, u32 aperture)
> +{
> +	struct cdns_pcie *pcie = &rc->pcie;
> +	u32 addr0, addr1;
> +
> +	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
> +		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
> +	addr1 = upper_32_bits(cpu_addr);
> +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
> +	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
> +}
> +
> +static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
> +				     struct resource_entry *entry,
> +				     enum cdns_pcie_rp_bar *index)
> +{
> +	u64 cpu_addr, pci_addr, size, winsize;
> +	struct cdns_pcie *pcie = &rc->pcie;
> +	struct device *dev = pcie->dev;
> +	enum cdns_pcie_rp_bar bar;
> +	unsigned long flags;
> +	u32 aperture;
> +	u32 value;
> +
> +	cpu_addr = entry->res->start;
> +	flags = entry->res->flags;
> +	pci_addr = entry->res->start - entry->offset;
> +	size = resource_size(entry->res);
> +	bar = *index;
> +
> +	if (entry->offset) {
> +		dev_err(dev, "Cannot map PCI addr: %llx to CPU addr: %llx\n",
> +			pci_addr, cpu_addr);
> +		return -EINVAL;
> +	}
> +
> +	value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
> +	while (size > 0) {
> +		if (bar > RP_NO_BAR) {
> +			dev_err(dev, "Failed to map inbound regions!\n");
> +			return -EINVAL;
> +		}
> +
> +		winsize = size;
> +		if (size > cdns_rp_bar_max_size[bar])
> +			winsize = cdns_rp_bar_max_size[bar];
> +
> +		aperture = ilog2(winsize);
> +
> +		cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, aperture);
> +
> +		if (bar == RP_NO_BAR)
> +			break;
> +
> +		if (winsize + cpu_addr >= SZ_4G) {
> +			if (!(flags & IORESOURCE_PREFETCH))
> +				value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
> +			value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
> +		} else {
> +			if (!(flags & IORESOURCE_PREFETCH))
> +				value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
> +			value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
> +		}
> +
> +		value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
> +
> +		size -= winsize;
> +		cpu_addr += winsize;
> +		bar++;
> +	}
> +	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
> +	*index = bar;
> +
> +	return 0;
> +}
> +
> +static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
> +{
> +	enum cdns_pcie_rp_bar bar = RP_BAR0;
> +	struct cdns_pcie *pcie = &rc->pcie;
> +	struct device *dev = pcie->dev;
> +	struct device_node *np = dev->of_node;
> +	struct pci_host_bridge *bridge;
> +	struct resource_entry *entry;
> +	u32 no_bar_nbits = 32;
> +	int err;
> +
> +	bridge = pci_host_bridge_from_priv(rc);
> +	if (!bridge)
> +		return -ENOMEM;
> +
> +	if (list_empty(&bridge->dma_ranges)) {
> +		of_property_read_u32(np, "cdns,no-bar-match-nbits",
> +				     &no_bar_nbits);
> +		cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, no_bar_nbits);
> +		return 0;
> +	}
> +
> +	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
> +		err = cdns_pcie_host_bar_config(rc, entry, &bar);
> +		if (err) {
> +			dev_err(dev, "Fail to configure IB using dma-ranges\n");
> +			return err;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
>  static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
>  {
>  	struct cdns_pcie *pcie = &rc->pcie;
> @@ -160,16 +277,9 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
>  		r++;
>  	}
>  
> -	/*
> -	 * Set Root Port no BAR match Inbound Translation registers:
> -	 * needed for MSI and DMA.
> -	 * Root Port BAR0 and BAR1 are disabled, hence no need to set their
> -	 * inbound translation registers.
> -	 */
> -	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
> -	addr1 = 0;
> -	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
> -	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
> +	err = cdns_pcie_host_map_dma_ranges(rc);
> +	if (err)
> +		return err;
>  
>  	return 0;
>  }
> @@ -179,10 +289,16 @@ static int cdns_pcie_host_init(struct device *dev,
>  			       struct cdns_pcie_rc *rc)
>  {
>  	struct resource *bus_range = NULL;
> +	struct pci_host_bridge *bridge;
>  	int err;
>  
> +	bridge = pci_host_bridge_from_priv(rc);
> +	if (!bridge)
> +		return -ENOMEM;
> +
>  	/* Parse our PCI ranges and request their resources */
> -	err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
> +	err = pci_parse_request_of_pci_ranges(dev, resources,
> +					      &bridge->dma_ranges, &bus_range);
>  	if (err)
>  		return err;
>  
> @@ -239,9 +355,6 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
>  	pcie = &rc->pcie;
>  	pcie->is_rc = true;
>  
> -	rc->no_bar_nbits = 32;
> -	of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
> -
>  	rc->vendor_id = 0xffff;
>  	of_property_read_u32(np, "vendor-id", &rc->vendor_id);
>  
> diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
> index f349f5828a58..336237a7025c 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence.h
> +++ b/drivers/pci/controller/cadence/pcie-cadence.h
> @@ -92,6 +92,20 @@
>  #define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS		0x6
>  #define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS	0x7
>  
> +#define LM_RC_BAR_CFG_CTRL_DISABLED(bar)		\
> +		(CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
> +#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar)		\
> +		(CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
> +#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar)		\
> +		(CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
> +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar)	\
> +	(CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
> +#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar)		\
> +		(CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
> +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar)	\
> +	(CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
> +#define LM_RC_BAR_CFG_APERTURE(bar, aperture)		\
> +					(((aperture) - 2) << ((bar) * 8))
>  
>  /*
>   * Endpoint Function Registers (PCI configuration space for endpoint functions)
> @@ -266,8 +280,6 @@ struct cdns_pcie {
>   * @bus_range: first/last buses behind the PCIe host controller
>   * @cfg_base: IO mapped window to access the PCI configuration space of a
>   *            single function at a time
> - * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
> - *                translation (nbits sets into the "no BAR match" register)
>   * @vendor_id: PCI vendor ID
>   * @device_id: PCI device ID
>   */
> @@ -276,7 +288,6 @@ struct cdns_pcie_rc {
>  	struct resource		*cfg_res;
>  	struct resource		*bus_range;
>  	void __iomem		*cfg_base;
> -	u32			no_bar_nbits;
>  	u32			vendor_id;
>  	u32			device_id;
>  };
> 

  reply	other threads:[~2020-05-18 11:12 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-08 13:06 [PATCH v3 0/4] PCI: cadence: Deprecate inbound/outbound specific bindings Kishon Vijay Abraham I
2020-05-08 13:06 ` [PATCH v3 1/4] dt-bindings: " Kishon Vijay Abraham I
2020-05-08 13:06 ` [PATCH v3 2/4] PCI: cadence: Remove "cdns,max-outbound-regions" DT property Kishon Vijay Abraham I
2020-05-08 13:06 ` [PATCH v3 3/4] PCI: cadence: Fix to read 32-bit Vendor ID/Device ID property from DT Kishon Vijay Abraham I
2020-05-08 13:06 ` [PATCH v3 4/4] PCI: cadence: Use "dma-ranges" instead of "cdns,no-bar-match-nbits" property Kishon Vijay Abraham I
2020-05-18 11:12   ` Kishon Vijay Abraham I [this message]
2020-05-19 17:11   ` Rob Herring
2020-05-21  3:30     ` Kishon Vijay Abraham I
2020-05-21 12:13       ` Kishon Vijay Abraham I
2020-05-18 14:57 ` [PATCH v3 0/4] PCI: cadence: Deprecate inbound/outbound specific bindings Lorenzo Pieralisi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b30c9882-b8f5-44e3-bfcd-7faefddbc4af@ti.com \
    --to=kishon@ti.com \
    --cc=bhelgaas@google.com \
    --cc=devicetree@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=lorenzo.pieralisi@arm.com \
    --cc=robh+dt@kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=tjoseph@cadence.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).