devicetree.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vinod Koul <vkoul@kernel.org>
To: Peter Ujfalusi <peter.ujfalusi@ti.com>
Cc: robh+dt@kernel.org, nm@ti.com, ssantosh@kernel.org,
	dan.j.williams@intel.com, dmaengine@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, devicetree@vger.kernel.org,
	linux-kernel@vger.kernel.org, grygorii.strashko@ti.com,
	lokeshvutla@ti.com, t-kristo@ti.com, tony@atomide.com,
	j-keerthy@ti.com
Subject: Re: [PATCH v4 11/15] dmaengine: ti: New driver for K3 UDMA - split#3: alloc/free chan_resources
Date: Mon, 11 Nov 2019 11:36:40 +0530	[thread overview]
Message-ID: <20191111060625.GP952516@vkoul-mobl> (raw)
In-Reply-To: <20191101084135.14811-12-peter.ujfalusi@ti.com>

On 01-11-19, 10:41, Peter Ujfalusi wrote:
> Split patch for review containing: channel rsource allocation and free

s/rsource/resource


> +static int udma_tisci_tx_channel_config(struct udma_chan *uc)
> +{
> +	struct udma_dev *ud = uc->ud;
> +	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
> +	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
> +	struct udma_tchan *tchan = uc->tchan;
> +	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
> +	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
> +	u32 mode, fetch_size;
> +	int ret = 0;
> +
> +	if (uc->pkt_mode) {
> +		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
> +		fetch_size = cppi5_hdesc_calc_size(uc->needs_epib, uc->psd_size,
> +						   0);
> +	} else {
> +		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
> +		fetch_size = sizeof(struct cppi5_desc_hdr_t);
> +	}
> +
> +	req_tx.valid_params =
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;

bunch of these are repeat, you can define a COMMON_VALID_PARAMS and use
that + specific ones..

> +
> +	req_tx.nav_id = tisci_rm->tisci_dev_id;
> +	req_tx.index = tchan->id;
> +	req_tx.tx_pause_on_err = 0;
> +	req_tx.tx_filt_einfo = 0;
> +	req_tx.tx_filt_pswords = 0;

i think initialization to 0 is superfluous

> +	req_tx.tx_chan_type = mode;
> +	req_tx.tx_supr_tdpkt = uc->notdpkt;
> +	req_tx.tx_fetch_size = fetch_size >> 2;
> +	req_tx.txcq_qnum = tc_ring;
> +	if (uc->ep_type == PSIL_EP_PDMA_XY) {
> +		/* wait for peer to complete the teardown for PDMAs */
> +		req_tx.valid_params |=
> +				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
> +		req_tx.tx_tdtype = 1;
> +	}
> +
> +	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
> +	if (ret)
> +		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
> +
> +	return ret;
> +}
> +
> +static int udma_tisci_rx_channel_config(struct udma_chan *uc)
> +{
> +	struct udma_dev *ud = uc->ud;
> +	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
> +	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
> +	struct udma_rchan *rchan = uc->rchan;
> +	int fd_ring = k3_ringacc_get_ring_id(rchan->fd_ring);
> +	int rx_ring = k3_ringacc_get_ring_id(rchan->r_ring);
> +	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
> +	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
> +	u32 mode, fetch_size;
> +	int ret = 0;
> +
> +	if (uc->pkt_mode) {
> +		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
> +		fetch_size = cppi5_hdesc_calc_size(uc->needs_epib,
> +							uc->psd_size, 0);
> +	} else {
> +		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
> +		fetch_size = sizeof(struct cppi5_desc_hdr_t);
> +	}
> +
> +	req_rx.valid_params =
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
> +
> +	req_rx.nav_id = tisci_rm->tisci_dev_id;
> +	req_rx.index = rchan->id;
> +	req_rx.rx_fetch_size =  fetch_size >> 2;
> +	req_rx.rxcq_qnum = rx_ring;
> +	req_rx.rx_pause_on_err = 0;
> +	req_rx.rx_chan_type = mode;
> +	req_rx.rx_ignore_short = 0;
> +	req_rx.rx_ignore_long = 0;
> +	req_rx.flowid_start = 0;
> +	req_rx.flowid_cnt = 0;
> +
> +	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
> +	if (ret) {
> +		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
> +		return ret;
> +	}
> +
> +	flow_req.valid_params =
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
> +		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
> +
> +	flow_req.nav_id = tisci_rm->tisci_dev_id;
> +	flow_req.flow_index = rchan->id;
> +
> +	if (uc->needs_epib)
> +		flow_req.rx_einfo_present = 1;
> +	else
> +		flow_req.rx_einfo_present = 0;
> +	if (uc->psd_size)
> +		flow_req.rx_psinfo_present = 1;
> +	else
> +		flow_req.rx_psinfo_present = 0;
> +	flow_req.rx_error_handling = 1;
> +	flow_req.rx_desc_type = 0;
> +	flow_req.rx_dest_qnum = rx_ring;
> +	flow_req.rx_src_tag_hi_sel = 2;
> +	flow_req.rx_src_tag_lo_sel = 4;
> +	flow_req.rx_dest_tag_hi_sel = 5;
> +	flow_req.rx_dest_tag_lo_sel = 4;

can we get rid of magic numbers here and elsewhere, or at least comment
on what these mean..

> +static int udma_alloc_chan_resources(struct dma_chan *chan)
> +{
> +	struct udma_chan *uc = to_udma_chan(chan);
> +	struct udma_dev *ud = to_udma_dev(chan->device);
> +	const struct udma_match_data *match_data = ud->match_data;
> +	struct k3_ring *irq_ring;
> +	u32 irq_udma_idx;
> +	int ret;
> +
> +	if (uc->pkt_mode || uc->dir == DMA_MEM_TO_MEM) {
> +		uc->use_dma_pool = true;
> +		/* in case of MEM_TO_MEM we have maximum of two TRs */
> +		if (uc->dir == DMA_MEM_TO_MEM) {
> +			uc->hdesc_size = cppi5_trdesc_calc_size(
> +					sizeof(struct cppi5_tr_type15_t), 2);
> +			uc->pkt_mode = false;
> +		}
> +	}
> +
> +	if (uc->use_dma_pool) {
> +		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
> +						 uc->hdesc_size, ud->desc_align,
> +						 0);
> +		if (!uc->hdesc_pool) {
> +			dev_err(ud->ddev.dev,
> +				"Descriptor pool allocation failed\n");
> +			uc->use_dma_pool = false;
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	/*
> +	 * Make sure that the completion is in a known state:
> +	 * No teardown, the channel is idle
> +	 */
> +	reinit_completion(&uc->teardown_completed);
> +	complete_all(&uc->teardown_completed);

should we not complete first and then do reinit to bring a clean state?

> +	uc->state = UDMA_CHAN_IS_IDLE;
> +
> +	switch (uc->dir) {
> +	case DMA_MEM_TO_MEM:

can you explain why a allocation should be channel dependent, shouldn't
these things be done in prep_ calls?

I looked ahead and checked the prep_ calls and we can use any direction
so this somehow doesn't make sense!
-- 
~Vinod

  reply	other threads:[~2019-11-11  6:06 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-01  8:41 [PATCH v4 00/15] dmaengine/soc: Add Texas Instruments UDMA support Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 01/15] bindings: soc: ti: add documentation for k3 ringacc Peter Ujfalusi
2019-11-11  4:07   ` Vinod Koul
2019-11-11  7:24     ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 02/15] soc: ti: k3: add navss ringacc driver Peter Ujfalusi
2019-11-11  4:21   ` Vinod Koul
2019-11-11  7:39     ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 03/15] dmaengine: doc: Add sections for per descriptor metadata support Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 04/15] dmaengine: Add metadata_ops for dma_async_tx_descriptor Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 05/15] dmaengine: Add support for reporting DMA cached data amount Peter Ujfalusi
2019-11-11  4:39   ` Vinod Koul
2019-11-11  8:00     ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 06/15] dmaengine: ti: Add cppi5 header for K3 NAVSS/UDMA Peter Ujfalusi
2019-11-05  7:40   ` Tero Kristo
2019-11-01  8:41 ` [PATCH v4 07/15] dmaengine: ti: k3 PSI-L remote endpoint configuration Peter Ujfalusi
2019-11-05  7:49   ` Tero Kristo
2019-11-05  8:13     ` Peter Ujfalusi
2019-11-05 10:00   ` Grygorii Strashko
2019-11-05 10:27     ` Peter Ujfalusi
2019-11-05 11:25       ` Grygorii Strashko
2019-11-11  4:47   ` Vinod Koul
2019-11-11  8:47     ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 08/15] dt-bindings: dma: ti: Add document for K3 UDMA Peter Ujfalusi
2019-11-05  2:19   ` Rob Herring
2019-11-05 10:08     ` Peter Ujfalusi
2019-11-14 17:53       ` Rob Herring
2019-11-15  9:45         ` Peter Ujfalusi
2019-11-26  8:29           ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 09/15] dmaengine: ti: New driver for K3 UDMA - split#1: defines, structs, io func Peter Ujfalusi
2019-11-11  5:28   ` Vinod Koul
2019-11-11  8:33     ` Peter Ujfalusi
2019-11-11  9:00       ` Vinod Koul
2019-11-11  9:12         ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 10/15] dmaengine: ti: New driver for K3 UDMA - split#2: probe/remove, xlate and filter_fn Peter Ujfalusi
2019-11-11  5:33   ` Vinod Koul
2019-11-11  9:16     ` Peter Ujfalusi
2019-11-12  5:34       ` Vinod Koul
2019-11-12  7:22         ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 11/15] dmaengine: ti: New driver for K3 UDMA - split#3: alloc/free chan_resources Peter Ujfalusi
2019-11-11  6:06   ` Vinod Koul [this message]
2019-11-11  9:40     ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 12/15] dmaengine: ti: New driver for K3 UDMA - split#4: dma_device callbacks 1 Peter Ujfalusi
2019-11-11  6:09   ` Vinod Koul
2019-11-11 10:29     ` Peter Ujfalusi
2019-11-12  5:36       ` Vinod Koul
2019-11-12  7:24         ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 13/15] dmaengine: ti: New driver for K3 UDMA - split#5: dma_device callbacks 2 Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 14/15] dmaengine: ti: New driver for K3 UDMA - split#6: Kconfig and Makefile Peter Ujfalusi
2019-11-11  6:11   ` Vinod Koul
2019-11-11 10:30     ` Peter Ujfalusi
2019-11-01  8:41 ` [PATCH v4 15/15] dmaengine: ti: k3-udma: Add glue layer for non DMAengine users Peter Ujfalusi
2019-11-11  6:12   ` Vinod Koul
2019-11-11 10:31     ` Peter Ujfalusi
2019-11-12  5:37       ` Vinod Koul
2019-11-12  7:25         ` Peter Ujfalusi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191111060625.GP952516@vkoul-mobl \
    --to=vkoul@kernel.org \
    --cc=dan.j.williams@intel.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dmaengine@vger.kernel.org \
    --cc=grygorii.strashko@ti.com \
    --cc=j-keerthy@ti.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lokeshvutla@ti.com \
    --cc=nm@ti.com \
    --cc=peter.ujfalusi@ti.com \
    --cc=robh+dt@kernel.org \
    --cc=ssantosh@kernel.org \
    --cc=t-kristo@ti.com \
    --cc=tony@atomide.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).