dmaengine.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v6 0/2] Add Intel LGM soc DMA support
@ 2020-09-08 23:07 Amireddy Mallikarjuna reddy
       [not found] ` <748370a51af0ab768e542f1537d1aa3aeefebe8a.1599605765.git.mallikarjunax.reddy@linux.intel.com>
       [not found] ` <ff2de5b0e4cd414420d48377c7c97c45d71f6197.1599605765.git.mallikarjunax.reddy@linux.intel.com>
  0 siblings, 2 replies; 5+ messages in thread
From: Amireddy Mallikarjuna reddy @ 2020-09-08 23:07 UTC (permalink / raw)
  To: dmaengine, vkoul, devicetree, robh+dt
  Cc: linux-kernel, andriy.shevchenko, chuanhua.lei, cheol.yong.kim,
	qi-ming.wu, mallikarjunax.reddy, malliamireddy009,
	peter.ujfalusi

Add DMA controller driver for Lightning Mountain(LGM) family of SoCs.

The main function of the DMA controller is the transfer of data from/to any
DPlus compliant peripheral to/from the memory. A memory to memory copy
capability can also be configured.
This ldma driver is used for configure the device and channnels for data
and control paths.

These controllers provide DMA capabilities for a variety of on-chip
devices such as SSC, HSNAND and GSWIP.

-------------
Future Plans:
-------------
LGM SOC also supports Hardware Memory Copy engine.
The role of the HW Memory copy engine is to offload memory copy operations
from the CPU.

Amireddy Mallikarjuna reddy (2):
  dt-bindings: dma: Add bindings for intel LGM SOC
  Add Intel LGM soc DMA support.

 .../devicetree/bindings/dma/intel,ldma.yaml        |  154 ++
 drivers/dma/Kconfig                                |    2 +
 drivers/dma/Makefile                               |    1 +
 drivers/dma/lgm/Kconfig                            |    9 +
 drivers/dma/lgm/Makefile                           |    2 +
 drivers/dma/lgm/lgm-dma.c                          | 1809 ++++++++++++++++++++
 include/linux/dma/lgm_dma.h                        |   27 +
 7 files changed, 2004 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/dma/intel,ldma.yaml
 create mode 100644 drivers/dma/lgm/Kconfig
 create mode 100644 drivers/dma/lgm/Makefile
 create mode 100644 drivers/dma/lgm/lgm-dma.c
 create mode 100644 include/linux/dma/lgm_dma.h

-- 
2.11.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v6 2/2] Add Intel LGM soc DMA support.
       [not found] ` <748370a51af0ab768e542f1537d1aa3aeefebe8a.1599605765.git.mallikarjunax.reddy@linux.intel.com>
@ 2020-09-09 11:14   ` Andy Shevchenko
       [not found]     ` <36a42016-3260-3933-bbf9-9203c4124115@linux.intel.com>
  0 siblings, 1 reply; 5+ messages in thread
From: Andy Shevchenko @ 2020-09-09 11:14 UTC (permalink / raw)
  To: Amireddy Mallikarjuna reddy
  Cc: dmaengine, vkoul, devicetree, robh+dt, linux-kernel,
	chuanhua.lei, cheol.yong.kim, qi-ming.wu, malliamireddy009,
	peter.ujfalusi

On Wed, Sep 09, 2020 at 07:07:34AM +0800, Amireddy Mallikarjuna reddy wrote:
> Add DMA controller driver for Lightning Mountain(LGM) family of SoCs.
> 
> The main function of the DMA controller is the transfer of data from/to any
> DPlus compliant peripheral to/from the memory. A memory to memory copy
> capability can also be configured.
> 
> This ldma driver is used for configure the device and channnels for data
> and control paths.

...

> +config INTEL_LDMA
> +	bool "Lightning Mountain centralized low speed DMA and high speed DMA controllers"
> +	select DMA_ENGINE
> +	select DMA_VIRTUAL_CHANNELS
> +	help
> +	  Enable support for intel Lightning Mountain SOC DMA controllers.
> +	  These controllers provide DMA capabilities for a variety of on-chip
> +	  devices such as SSC, HSNAND and GSWIP.

And how module will be called?

...

> +struct ldma_dev;
> +struct ldma_port;

+ blank line

> +struct ldma_chan {
> +	struct ldma_port	*port; /* back pointer */
> +	char			name[8]; /* Channel name */

> +	struct virt_dma_chan	vchan;

You can make container_of() no-op if you put this to be first member of the
structure.

> +	int			nr; /* Channel id in hardware */
> +	u32			flags; /* central way or channel based way */
> +	enum ldma_chan_on_off	onoff;
> +	dma_addr_t		desc_phys;
> +	void			*desc_base; /* Virtual address */
> +	u32			desc_cnt; /* Number of descriptors */
> +	int			rst;
> +	u32			hdrm_len;
> +	bool			hdrm_csum;
> +	u32			boff_len;
> +	u32			data_endian;
> +	u32			desc_endian;
> +	bool			pden;
> +	bool			desc_rx_np;
> +	bool			data_endian_en;
> +	bool			desc_endian_en;
> +	bool			abc_en;
> +	bool			desc_init;
> +	struct dma_pool		*desc_pool; /* Descriptors pool */
> +	u32			desc_num;
> +	struct dw2_desc_sw	*ds;
> +	struct work_struct	work;
> +	struct dma_slave_config config;
> +};

...

> +		struct {
> +			u32 len		:16;
> +			u32 res0	:7;
> +			u32 bofs	:2;
> +			u32 res1	:3;
> +			u32 eop		:1;
> +			u32 sop		:1;
> +			u32 c		:1;
> +			u32 own		:1;
> +		} __packed field;
> +		u32 word;

Can you rather use bitfield.h?

> +	} __packed status;
> +	u32 addr;
> +} __packed __aligned(8);

...

> +struct dw2_desc_sw {
> +	struct ldma_chan	*chan;

> +	struct virt_dma_desc	vdesc;

Make it first and container_of() becomes no-op.

> +	dma_addr_t		desc_phys;
> +	size_t			desc_cnt;
> +	size_t			size;
> +	struct dw2_desc		*desc_hw;
> +};

...

> +ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
> +{
> +	u32 old_val, new_val;
> +
> +	old_val = readl(d->base +  ofs);

> +	new_val = (old_val & ~mask) | (val & mask);

With bitfield.h you will have this as u32_replace_bits().

> +
> +	if (new_val != old_val)
> +		writel(new_val, d->base + ofs);
> +}

...

> +	/* Keep the class value unchanged */
> +	reg &= DMA_CCTRL_CLASS | DMA_CCTRL_CLASSH;
> +	reg |= val;

No mask? Consider u32_replace_bits() or other FIELD_*() macros.

...

> +static void ldma_chan_desc_hw_cfg(struct ldma_chan *c, dma_addr_t desc_base,
> +				  int desc_num)
> +{
> +	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&d->dev_lock, flags);
> +	ldma_update_bits(d, DMA_CS_MASK, c->nr, DMA_CS);
> +	writel(lower_32_bits(desc_base), d->base + DMA_CDBA);

> +	/* High 4 bits */

Why only 4?

> +	if (IS_ENABLED(CONFIG_64BIT)) {

> +		u32 hi = upper_32_bits(desc_base) & 0xF;

GENMASK() ?

> +
> +		ldma_update_bits(d, DMA_CDBA_MSB,
> +				 FIELD_PREP(DMA_CDBA_MSB, hi), DMA_CCTRL);
> +	}
> +	writel(desc_num, d->base + DMA_CDLEN);
> +	spin_unlock_irqrestore(&d->dev_lock, flags);
> +
> +	c->desc_init = true;
> +}

...


> +	dev_dbg(d->dev, "Port Control 0x%08x configuration done\n",
> +		readl(d->base + DMA_PCTRL));

This has a side effect. Better to use temporary variable if you need to read
back.

...

> +static int ldma_chan_cfg(struct ldma_chan *c)
> +{
> +	struct ldma_dev *d = to_ldma_dev(c->vchan.chan.device);
> +	unsigned long flags;
> +	u32 reg;
> +
> +	reg = c->pden ? DMA_CCTRL_PDEN : 0;
> +	reg |= c->onoff ? DMA_CCTRL_ON : 0;
> +	reg |= c->rst ? DMA_CCTRL_RST : 0;
> +
> +	ldma_chan_cctrl_cfg(c, reg);
> +	ldma_chan_irq_init(c);

> +	if (d->ver > DMA_VER22) {

	if (d->ver <= DMA_VER22)
		return 0;

	?

> +		spin_lock_irqsave(&d->dev_lock, flags);
> +		ldma_chan_set_class(c, c->nr);
> +		ldma_chan_byte_offset_cfg(c, c->boff_len);
> +		ldma_chan_data_endian_cfg(c, c->data_endian_en, c->data_endian);
> +		ldma_chan_desc_endian_cfg(c, c->desc_endian_en, c->desc_endian);
> +		ldma_chan_hdr_mode_cfg(c, c->hdrm_len, c->hdrm_csum);
> +		ldma_chan_rxwr_np_cfg(c, c->desc_rx_np);
> +		ldma_chan_abc_cfg(c, c->abc_en);
> +		spin_unlock_irqrestore(&d->dev_lock, flags);
> +
> +		if (ldma_chan_is_hw_desc(c))
> +			ldma_chan_desc_hw_cfg(c, c->desc_phys, c->desc_cnt);
> +	}
> +
> +	return 0;
> +}

...

> +	/* DMA channel initialization */
> +	for (i = 0; i < d->chan_nrs; i++) {
> +		if (d->ver == DMA_VER22 && !(d->channels_mask & BIT(i)))
> +			continue;

for_each_set_bit() ?

> +		c = &d->chans[i];
> +		ldma_chan_cfg(c);
> +	}

...

> +		for (i = 0; i < d->port_nrs; i++) {
> +			p = &d->ports[i];
> +			p->rxendi = DMA_DFT_ENDIAN;
> +			p->txendi = DMA_DFT_ENDIAN;

> +			if (!fwnode_property_read_u32(fwnode, "intel,dma-burst",
> +						      &prop)) {

How this is not invariant inside the loop?

> +				p->rxbl = prop;
> +				p->txbl = prop;
> +			} else {
> +				p->rxbl = DMA_DFT_BURST;
> +				p->txbl = DMA_DFT_BURST;
> +			}
> +
> +			p->pkt_drop = DMA_PKT_DROP_DIS;
> +		}

...

> +	if (d->ver == DMA_VER22) {
> +		spin_lock_irqsave(&c->vchan.lock, flags);
> +		if (vchan_issue_pending(&c->vchan)) {
> +			struct virt_dma_desc *vdesc;
> +
> +			/* Get the next descriptor */
> +			vdesc = vchan_next_desc(&c->vchan);
> +			if (!vdesc) {
> +				c->ds = NULL;

Nice! Don't you forget something to do here?

> +				return;

> +			}
> +			list_del(&vdesc->node);
> +			c->ds = to_lgm_dma_desc(vdesc);
> +			ldma_chan_desc_hw_cfg(c, c->ds->desc_phys, c->ds->desc_cnt);
> +			ldma_chan_irq_en(c);
> +		}
> +		spin_unlock_irqrestore(&c->vchan.lock, flags);
> +	}

...

> +	irncr = readl(d->base + DMA_IRNCR);
> +	if (!irncr) {

> +		dev_err(d->dev, "dummy interrupt\n");

I could imagine what happens in case of shared IRQ...

> +		return IRQ_NONE;
> +	}

...

> +	/* Default setting will be used */
> +	if (cfg->src_maxburst != 2 && cfg->src_maxburst != 4 &&
> +	    cfg->src_maxburst != 8)

This is strange. Caller should have a possibility to set anything based on the
channel and device capabilities. This one is hidden problem for the caller. Are
you going to customize each peripheral driver for your DMA engine
implementation?

> +		return;

...

> +	if (!sgl)
> +		return NULL;

Is it possible?

...

> +static int
> +dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
> +{
> +	struct ldma_chan *c = to_ldma_chan(chan);
> +
> +	if ((cfg->direction == DMA_DEV_TO_MEM &&
> +	     cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
> +	    (cfg->direction == DMA_MEM_TO_DEV &&
> +	     cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||

Why?

> +	    !is_slave_direction(cfg->direction))
> +		return -EINVAL;
> +
> +	/* Must be the same */

Why?

> +	if (cfg->src_maxburst && cfg->dst_maxburst &&
> +	    cfg->src_maxburst != cfg->dst_maxburst)
> +		return -EINVAL;

> +	if (cfg->src_maxburst != 2 && cfg->src_maxburst != 4 &&
> +	    cfg->src_maxburst != 8)

Why?

> +		return -EINVAL;
> +
> +	memcpy(&c->config, cfg, sizeof(c->config));
> +
> +	return 0;
> +}

...

> +static void dma_work(struct work_struct *work)
> +{
> +	struct ldma_chan *c = container_of(work, struct ldma_chan, work);
> +	struct dma_async_tx_descriptor *tx = &c->ds->vdesc.tx;
> +	struct virt_dma_chan *vc = &c->vchan;
> +	struct dmaengine_desc_callback cb;
> +	struct virt_dma_desc *vd, *_vd;
> +	LIST_HEAD(head);
> +
> +	list_splice_tail_init(&vc->desc_completed, &head);

No protection?

> +	dmaengine_desc_get_callback(tx, &cb);
> +	dma_cookie_complete(tx);
> +	dmaengine_desc_callback_invoke(&cb, NULL);
> +
> +	list_for_each_entry_safe(vd, _vd, &head, node) {
> +		dmaengine_desc_get_callback(tx, &cb);
> +		dma_cookie_complete(tx);
> +		list_del(&vd->node);
> +		dmaengine_desc_callback_invoke(&cb, NULL);
> +
> +		vchan_vdesc_fini(vd);
> +	}
> +	c->ds = NULL;
> +}

...

> +static int
> +update_client_configs(struct of_dma *ofdma, struct of_phandle_args *spec)
> +{
> +	struct ldma_dev *d = ofdma->of_dma_data;
> +	struct ldma_port *p;
> +	struct ldma_chan *c;
> +	u32 chan_id =  spec->args[0];
> +	u32 port_id =  spec->args[1];
> +
> +	if (chan_id >= d->chan_nrs || port_id >= d->port_nrs)
> +		return 0;
> +
> +	p = &d->ports[port_id];
> +	c = &d->chans[chan_id];
> +	c->port = p;
> +
> +	if (d->ver == DMA_VER22) {
> +		u32 desc_num;
> +		u32 burst = spec->args[2];
> +
> +		if (burst != 2 && burst != 4 && burst != 8)
> +			return 0;
> +
> +		/* TX and RX has the same burst length */
> +		p->txbl = ilog2(burst);
> +		p->rxbl = p->txbl;
> +
> +		desc_num = spec->args[3];
> +		if (desc_num > 255)
> +			return 0;
> +		c->desc_num = desc_num;
> +
> +		ldma_port_cfg(p);
> +		ldma_chan_cfg(c);
> +	} else {
> +		if (spec->args[2] > 0 && spec->args[2] <= DMA_ENDIAN_TYPE3) {
> +			c->data_endian = spec->args[2];
> +			c->data_endian_en = true;
> +		}
> +
> +		if (spec->args[3] > 0 && spec->args[3] <= DMA_ENDIAN_TYPE3) {
> +			c->desc_endian = spec->args[3];
> +			c->desc_endian_en = true;
> +		}
> +
> +		if (spec->args[4] > 0 && spec->args[4] < 128)
> +			c->boff_len = spec->args[4];
> +
> +		if (spec->args[5])
> +			c->desc_rx_np = true;
> +
> +		/*
> +		 * If channel packet drop enabled, port packet drop should
> +		 * be enabled
> +		 */
> +		if (spec->args[6]) {
> +			c->pden = true;
> +			p->pkt_drop = DMA_PKT_DROP_EN;
> +		}
> +
> +		/*
> +		 * hdr-mode: If enabled, header mode size is ignored
> +		 *           If disabled, header mode size must be provided
> +		 */
> +		c->hdrm_csum = !!spec->args[8];
> +		if (!c->hdrm_csum) {
> +			if (!spec->args[7] || spec->args[7] > DMA_HDR_LEN_MAX)
> +				return 0;
> +			c->hdrm_len = spec->args[7];
> +		}
> +
> +		if (spec->args[10]) {
> +			c->desc_cnt = spec->args[10];
> +			if (c->desc_cnt > DMA_MAX_DESC_NUM) {
> +				dev_err(d->dev, "Channel %d descriptor number out of range %d\n",
> +					c->nr, c->desc_cnt);
> +				return 0;
> +			}
> +			c->desc_phys = spec->args[9];
> +			c->flags |= DMA_HW_DESC;
> +		}
> +
> +		ldma_port_cfg(p);
> +		ldma_chan_cfg(c);
> +	}
> +
> +	return 1;
> +}
> +

Can you split all these kind of functions each to three:

foo_vXXX()
foo_v22()

foo()
{
	if (ver = 22)
		return foo_v22()
	return foo_vXXX()
}

?

...

> +	d->rst = devm_reset_control_get_optional(dev, NULL);
> +	if (IS_ERR(d->rst))
> +		return PTR_ERR(d->rst);
> +	reset_control_deassert(d->rst);

Shouldn't be devm_add_action_or_reset() for assert reset?

...

> +	if (IS_ENABLED(CONFIG_64BIT)) {
> +		if (id & DMA_ID_AW_36B)
> +			bitn = 36;
> +	}

if (a) { if (b) { ... }} ==> if (a && b) { ...}

...

> +	if (d->ver == DMA_VER22) {

Split?

> +		ret = of_property_read_u32(pdev->dev.of_node, "dma-channels",
> +					   &d->chan_nrs);
> +		if (ret < 0) {
> +			dev_err(dev, "unable to read dma-channels property\n");
> +			return ret;
> +		}
> +
> +		ret = of_property_read_u32(pdev->dev.of_node, "dma-channel-mask",
> +					   &d->channels_mask);
> +		if (ret < 0)
> +			d->channels_mask = GENMASK(d->chan_nrs - 1, 0);

of_property*() are leftovers? Shouldn't be device_property_*()?

> +
> +		d->irq = platform_get_irq(pdev, 0);
> +		if (d->irq < 0)
> +			return d->irq;
> +
> +		ret = devm_request_irq(&pdev->dev, d->irq, dma_interrupt,
> +				       0, DRIVER_NAME, d);
> +		if (ret)
> +			return ret;
> +
> +		d->wq = alloc_ordered_workqueue("dma_wq", WQ_MEM_RECLAIM |
> +						WQ_HIGHPRI);
> +		if (!d->wq)
> +			return -ENOMEM;
> +	}

...

> +	for (i = 0; i < d->port_nrs; i++) {
> +		p = &d->ports[i];
> +		p->portid = i;
> +		p->ldev = d;

> +		for (j = 0; j < d->chan_nrs && d->ver != DMA_VER22; j++)
> +			c = &d->chans[j];

What's going on here?

> +	}

...

> +	for (i = 0; i < d->chan_nrs; i++) {
> +		if (d->ver == DMA_VER22) {

Split...

> +			if (!(d->channels_mask & BIT(i)))
> +				continue;

...and obviously for_each_set_bit().

> +			c = &d->chans[i];
> +			c->nr = i; /* Real channel number */
> +			c->rst = DMA_CHAN_RST;
> +			snprintf(c->name, sizeof(c->name), "chan%d",
> +				 c->nr);
> +			INIT_WORK(&c->work, dma_work);
> +			c->vchan.desc_free = dma_free_desc_resource;
> +			vchan_init(&c->vchan, dma_dev);
> +		} else {
> +			c = &d->chans[i];
> +			c->data_endian = DMA_DFT_ENDIAN;
> +			c->desc_endian = DMA_DFT_ENDIAN;
> +			c->data_endian_en = false;
> +			c->desc_endian_en = false;
> +			c->desc_rx_np = false;
> +			c->flags |= DEVICE_ALLOC_DESC;
> +			c->onoff = DMA_CH_OFF;
> +			c->rst = DMA_CHAN_RST;
> +			c->abc_en = true;
> +			c->nr = i;
> +			c->vchan.desc_free = dma_free_desc_resource;
> +			vchan_init(&c->vchan, dma_dev);
> +		}
> +	}

...

> +static int __init intel_ldma_init(void)
> +{
> +	return platform_driver_register(&intel_ldma_driver);
> +}
> +
> +device_initcall(intel_ldma_init);

Each _initcall() in general should be explained.

...

> +#include <linux/dmaengine.h>

I don't see how it's used

struct dma_chan;

should be enough.

> +/*!
> + * \fn int intel_dma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base,
> + *                                 int desc_num)
> + * \brief Configure low level channel descriptors
> + * \param[in] chan   pointer to DMA channel that the client is using
> + * \param[in] desc_base   descriptor base physical address
> + * \param[in] desc_num   number of descriptors
> + * \return   0 on success
> + * \return   kernel bug reported on failure
> + *
> + * This function configure the low level channel descriptors. It will be
> + * used by CBM whose descriptor is not DDR, actually some registers.
> + */
> +int intel_dma_chan_desc_cfg(struct dma_chan *chan, dma_addr_t desc_base,
> +			    int desc_num);

-- 
With Best Regards,
Andy Shevchenko



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v6 1/2] dt-bindings: dma: Add bindings for intel LGM SOC
       [not found] ` <ff2de5b0e4cd414420d48377c7c97c45d71f6197.1599605765.git.mallikarjunax.reddy@linux.intel.com>
@ 2020-09-09 11:27   ` Vinod Koul
  0 siblings, 0 replies; 5+ messages in thread
From: Vinod Koul @ 2020-09-09 11:27 UTC (permalink / raw)
  To: Amireddy Mallikarjuna reddy
  Cc: dmaengine, devicetree, robh+dt, linux-kernel, andriy.shevchenko,
	chuanhua.lei, cheol.yong.kim, qi-ming.wu, malliamireddy009,
	peter.ujfalusi

Hi Amireddy,

On 09-09-20, 07:07, Amireddy Mallikarjuna reddy wrote:
> Add DT bindings YAML schema for DMA controller driver
> of Lightning Mountain(LGM) SoC.
> 
> Signed-off-by: Amireddy Mallikarjuna reddy <mallikarjunax.reddy@linux.intel.com>
> ---
> v1:
> - Initial version.
> 
> v2:
> - Fix bot errors.
> 
> v3:
> - No change.
> 
> v4:
> - Address Thomas langer comments
>   - use node name pattern as dma-controller as in common binding.
>   - Remove "_" (underscore) in instance name.
>   - Remove "port-" and "chan-" in attribute name for both 'dma-ports' & 'dma-channels' child nodes.
> 
> v5:
> - Moved some of the attributes in 'dma-ports' & 'dma-channels' child nodes to dma client/consumer side as cells in 'dmas' properties.
> 
> v6:
> - Add additionalProperties: false
> - completely removed 'dma-ports' and 'dma-channels' child nodes.
> - Moved channel dt properties to client side dmas.
> - Use standard dma-channels and dma-channel-mask properties.
> - Documented reset-names
> - Add description for dma-cells
> ---
>  .../devicetree/bindings/dma/intel,ldma.yaml        | 154 +++++++++++++++++++++
>  1 file changed, 154 insertions(+)
>  create mode 100644 Documentation/devicetree/bindings/dma/intel,ldma.yaml
> 
> diff --git a/Documentation/devicetree/bindings/dma/intel,ldma.yaml b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> new file mode 100644
> index 000000000000..4a2a12b829eb
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/dma/intel,ldma.yaml
> @@ -0,0 +1,154 @@
> +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
> +%YAML 1.2
> +---
> +$id: http://devicetree.org/schemas/dma/intel,ldma.yaml#
> +$schema: http://devicetree.org/meta-schemas/core.yaml#
> +
> +title: Lightning Mountain centralized low speed DMA and high speed DMA controllers.
> +
> +maintainers:
> +  - chuanhua.lei@intel.com
> +  - mallikarjunax.reddy@intel.com
> +
> +allOf:
> +  - $ref: "dma-controller.yaml#"
> +
> +properties:
> + $nodename:
> +   pattern: "^dma-controller(@.*)?$"
> +
> + compatible:
> +  enum:
> +   - intel,lgm-cdma
> +   - intel,lgm-dma2tx
> +   - intel,lgm-dma1rx
> +   - intel,lgm-dma1tx
> +   - intel,lgm-dma0tx
> +   - intel,lgm-dma3
> +   - intel,lgm-toe-dma30
> +   - intel,lgm-toe-dma31

Should this not say oneOf?

> +
> + reg:
> +  maxItems: 1
> +
> + "#dma-cells":
> +  const: 11

wow that is a big one

> +  description:
> +    The first & second cell is channel and port id's respectievly.

What does port id mean?

Can you please describe each parameter

> +    Third & fourth cells is Per channel data & descriptor endianness configuration respectievly according to SoC requirement.

What does per channel data refer? Isnt this all little endian?

> +    Fifth cell is Per channel byte offset(0~128)

Offset of?

> +    Sixth cell is per channel Write non-posted type for DMA RX last data beat of every descriptor.

Am not sure what this means?

> +    Seventh cell is per channel packet drop enabled or disabled.

Same here

> +    Eighth and nighth cells, The first is header mode size, the second is checksum enable or disable.
> +       If enabled, header mode size is ignored. If disabled, header mode size must be provided.
> +    Last two cells is Per channel dma hardware descriptor configuration.
> +       The first parameter is descriptor physical address and the second parameter hardware descriptor number

Do you really use all these parameters, or most of them are filled with
defaults?

> +
> + dma-channels:
> +  minimum: 1
> +  maximum: 16
> +
> + dma-channel-mask:
> +  $ref: /schemas/types.yaml#/definitions/uint32-array

This is already defined in
Documentation/devicetree/bindings/dma/dma-common.yaml, no need to define
this here

> +  items:
> +    minItems: 1
> +    # Should be enough

> +
> + clocks:
> +  maxItems: 1
> +
> + resets:
> +  maxItems: 1
> +
> + reset-names:
> +  items:
> +    - const: ctrl
> +
> + interrupts:
> +  maxItems: 1
> +
> + intel,dma-poll-cnt:
> +   $ref: /schemas/types.yaml#definitions/uint32
> +   description:
> +     DMA descriptor polling counter. It may need fine tune according
> +     to the system application scenario.

What does this mean? How will system application fine tune?

> +
> + intel,dma-byte-en:
> +   type: boolean
> +   description:
> +     DMA byte enable is only valid for DMA write(RX).
> +     Byte enable(1) means DMA write will be based on the number of dwords
> +     instead of the whole burst.

You already have this in #dma-cells, so why here. If here then why in
dma-cells

> +
> + intel,dma-drb:
> +    type: boolean
> +    description:
> +      DMA descriptor read back to make sure data and desc synchronization.

I think this is also in #dma-cells?

> +
> + intel,dma-burst:
> +    $ref: /schemas/types.yaml#definitions/uint32
> +    description:
> +       Specifiy the DMA burst size(in dwords), the valid value will be 8, 16, 32.
> +       Default is 16 for data path dma, 32 is for memcopy DMA.

Burst should come from client, why is this here?

> +
> + intel,dma-polling-cnt:
> +    $ref: /schemas/types.yaml#definitions/uint32
> +    description:
> +       DMA descriptor polling counter. It may need fine tune according to
> +       the system application scenario.

What does this counter do?

> +
> + intel,dma-desc-in-sram:
> +    type: boolean
> +    description:
> +       DMA descritpors in SRAM or not. Some old controllers descriptors
> +       can be in DRAM or SRAM. The new ones are all in SRAM.

What does this do?

> +
> + intel,dma-orrc:
> +    $ref: /schemas/types.yaml#definitions/uint32
> +    description:
> +       DMA outstanding read counter. The maximum value is 16, and it may
> +       need fine tune according to the system application scenarios.

What does this do?

> +
> + intel,dma-dburst-wr:
> +    type: boolean
> +    description:
> +       Enable RX dynamic burst write. It only applies to RX DMA and memcopy DMA.

What does this do?

> +
> +required:
> + - compatible
> + - reg
> + - '#dma-cells'
> +
> +additionalProperties: false
> +
> +examples:
> + - |
> +   dma0: dma-controller@e0e00000 {
> +     compatible = "intel,lgm-cdma";
> +     reg = <0xe0e00000 0x1000>;
> +     #dma-cells = <11>;
> +     dma-channels = <16>;
> +     dma-channel-mask = <0xFFFF>;
> +     interrupt-parent = <&ioapic1>;
> +     interrupts = <82 1>;
> +     resets = <&rcu0 0x30 0>;
> +     reset-names = "ctrl";
> +     clocks = <&cgu0 80>;
> +     intel,dma-poll-cnt = <4>;
> +     intel,dma-byte-en;
> +     intel,dma-drb;
> +   };
> + - |
> +   dma3: dma-controller@ec800000 {
> +     compatible = "intel,lgm-dma3";
> +     reg = <0xec800000 0x1000>;
> +     clocks = <&cgu0 71>;
> +     resets = <&rcu0 0x10 9>;
> +     #dma-cells = <11>;
> +     intel,dma-burst = <32>;
> +     intel,dma-polling-cnt = <16>;
> +     intel,dma-desc-in-sram;
> +     intel,dma-orrc = <16>;
> +     intel,dma-byte-en;
> +     intel,dma-dburst-wr;
> +   };
> -- 
> 2.11.0

-- 
~Vinod

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v6 2/2] Add Intel LGM soc DMA support.
       [not found]     ` <36a42016-3260-3933-bbf9-9203c4124115@linux.intel.com>
@ 2020-09-18 12:20       ` Andy Shevchenko
  2020-09-21 15:13         ` Reddy, MallikarjunaX
  0 siblings, 1 reply; 5+ messages in thread
From: Andy Shevchenko @ 2020-09-18 12:20 UTC (permalink / raw)
  To: Reddy, MallikarjunaX
  Cc: dmaengine, vkoul, devicetree, robh+dt, linux-kernel,
	chuanhua.lei, cheol.yong.kim, qi-ming.wu, malliamireddy009,
	peter.ujfalusi

On Fri, Sep 18, 2020 at 11:42:54AM +0800, Reddy, MallikarjunaX wrote:
> On 9/9/2020 7:14 PM, Andy Shevchenko wrote:
> > On Wed, Sep 09, 2020 at 07:07:34AM +0800, Amireddy Mallikarjuna reddy wrote:

...

> > > +	help
> > > +	  Enable support for intel Lightning Mountain SOC DMA controllers.
> > > +	  These controllers provide DMA capabilities for a variety of on-chip
> > > +	  devices such as SSC, HSNAND and GSWIP.
> > And how module will be called?
>  are you expecting to include 'default y' ?

I'm expecting to see something like "if you choose M the module will be called
bla-foo-bar." Look at the existing examples in the kernel.

...

> > > +ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
> > > +{
> > > +	u32 old_val, new_val;
> > > +
> > > +	old_val = readl(d->base +  ofs);
> > > +	new_val = (old_val & ~mask) | (val & mask);
> > With bitfield.h you will have this as u32_replace_bits().
> -  new_val = (old_val & ~mask) | (val & mask);
> + new_val = old_val;
> + u32_replace_bits(new_val, val, mask);
> 
> I think in this function we cant use this because of compilation issues
> thrown by bitfield.h . Expecting 2nd and 3rd arguments as constant numbers
> not as type variables.
> 
> ex:
> 	u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);

How comes these are constants? In the above you have a function which does
r-m-w approach to the register. It should be something like

	old = read();
	new = u32_replace_bits(old, ...);
	write(new);

> ./include/linux/bitfield.h:131:3: error: call to '__field_overflow' declared
> with attribute error: value doesn't fit into mask
>    __field_overflow();     \
>    ^~~~~~~~~~~~~~~~~~
> 
> ./include/linux/bitfield.h:119:3: error: call to '__bad_mask' declared with
> attribute error: bad bitfield mask
>    __bad_mask();
>    ^~~~~~~~~~~~

So, even with constants u32_replace_bits() must work. Maybe you didn't get how?

> > > +	if (new_val != old_val)
> > > +		writel(new_val, d->base + ofs);
> > > +}

...

> > > +	/* High 4 bits */
> > Why only 4?
> this is higher 4 bits of 36 bit addressing..

Make it clear in the comment.

...

> > > +device_initcall(intel_ldma_init);
> > Each _initcall() in general should be explained.
> ok. is it fine?
> 
> /* Perform this driver as device_initcall to make sure initialization
> happens
>  * before its dma clients of some are platform specific. make sure to
> provice
>  * registered dma channels and dma capabilities to client before their
>  * initialization.
>  */

/*
 * Just follow proper multi-line comment style.
 * And use dma -> DMA.
 */

-- 
With Best Regards,
Andy Shevchenko



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v6 2/2] Add Intel LGM soc DMA support.
  2020-09-18 12:20       ` Andy Shevchenko
@ 2020-09-21 15:13         ` Reddy, MallikarjunaX
  0 siblings, 0 replies; 5+ messages in thread
From: Reddy, MallikarjunaX @ 2020-09-21 15:13 UTC (permalink / raw)
  To: Andy Shevchenko
  Cc: dmaengine, vkoul, devicetree, robh+dt, linux-kernel,
	chuanhua.lei, cheol.yong.kim, qi-ming.wu, malliamireddy009,
	peter.ujfalusi

Hi Andy,
Thanks for your comments. My comments are in line.

On 9/18/2020 8:20 PM, Andy Shevchenko wrote:
> On Fri, Sep 18, 2020 at 11:42:54AM +0800, Reddy, MallikarjunaX wrote:
>> On 9/9/2020 7:14 PM, Andy Shevchenko wrote:
>>> On Wed, Sep 09, 2020 at 07:07:34AM +0800, Amireddy Mallikarjuna reddy wrote:
> ...
>
>>>> +	help
>>>> +	  Enable support for intel Lightning Mountain SOC DMA controllers.
>>>> +	  These controllers provide DMA capabilities for a variety of on-chip
>>>> +	  devices such as SSC, HSNAND and GSWIP.
>>> And how module will be called?
>>   are you expecting to include 'default y' ?
> I'm expecting to see something like "if you choose M the module will be called
> bla-foo-bar." Look at the existing examples in the kernel.
ok, i will change bool to tristate.
>
> ...
>
>>>> +ldma_update_bits(struct ldma_dev *d, u32 mask, u32 val, u32 ofs)
>>>> +{
>>>> +	u32 old_val, new_val;
>>>> +
>>>> +	old_val = readl(d->base +  ofs);
>>>> +	new_val = (old_val & ~mask) | (val & mask);
>>> With bitfield.h you will have this as u32_replace_bits().
>> -  new_val = (old_val & ~mask) | (val & mask);
>> + new_val = old_val;
>> + u32_replace_bits(new_val, val, mask);
>>
>> I think in this function we cant use this because of compilation issues
>> thrown by bitfield.h . Expecting 2nd and 3rd arguments as constant numbers
>> not as type variables.
>>
>> ex:
>> 	u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
> How comes these are constants? In the above you have a function which does
> r-m-w approach to the register. It should be something like
>
> 	old = read();
> 	new = u32_replace_bits(old, ...);
> 	write(new);
>
>> ./include/linux/bitfield.h:131:3: error: call to '__field_overflow' declared
>> with attribute error: value doesn't fit into mask
>>     __field_overflow();     \
>>     ^~~~~~~~~~~~~~~~~~
>>
>> ./include/linux/bitfield.h:119:3: error: call to '__bad_mask' declared with
>> attribute error: bad bitfield mask
>>     __bad_mask();
>>     ^~~~~~~~~~~~
> So, even with constants u32_replace_bits() must work. Maybe you didn't get how?

Thanks Andy, now i know how u32_replace_bits() is working.

The mask is not the continuous bits in some cases. Due to the mask bits 
are not continuous u32_replace_bits() can't be used here.
Ex:
         u32 mask = DMA_CPOLL_EN | DMA_CPOLL_CNT;

Comes to __field_overflow error, update bits in the 'val' are aligned 
with mask bits. Because of the this reason in u32_replace_bits() 'val'  
exceeds the 'mask' in some cases which is causing __field_overflow error.

>>>> +	if (new_val != old_val)
>>>> +		writel(new_val, d->base + ofs);
>>>> +}
> ...
>
>>>> +	/* High 4 bits */
>>> Why only 4?
>> this is higher 4 bits of 36 bit addressing..
> Make it clear in the comment.
ok.
>
> ...
>
>>>> +device_initcall(intel_ldma_init);
>>> Each _initcall() in general should be explained.
>> ok. is it fine?
>>
>> /* Perform this driver as device_initcall to make sure initialization
>> happens
>>   * before its dma clients of some are platform specific. make sure to
>> provice
>>   * registered dma channels and dma capabilities to client before their
>>   * initialization.
>>   */
> /*
>   * Just follow proper multi-line comment style.
>   * And use dma -> DMA.
>   */
Ok.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-09-21 15:13 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-08 23:07 [PATCH v6 0/2] Add Intel LGM soc DMA support Amireddy Mallikarjuna reddy
     [not found] ` <748370a51af0ab768e542f1537d1aa3aeefebe8a.1599605765.git.mallikarjunax.reddy@linux.intel.com>
2020-09-09 11:14   ` [PATCH v6 2/2] " Andy Shevchenko
     [not found]     ` <36a42016-3260-3933-bbf9-9203c4124115@linux.intel.com>
2020-09-18 12:20       ` Andy Shevchenko
2020-09-21 15:13         ` Reddy, MallikarjunaX
     [not found] ` <ff2de5b0e4cd414420d48377c7c97c45d71f6197.1599605765.git.mallikarjunax.reddy@linux.intel.com>
2020-09-09 11:27   ` [PATCH v6 1/2] dt-bindings: dma: Add bindings for intel LGM SOC Vinod Koul

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).