From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Return-Path: From: Peter Griffin Subject: [PATCH v8 05/18] dmaengine: st_fdma: Add STMicroelectronics FDMA engine driver support Date: Fri, 26 Aug 2016 15:56:40 +0100 Message-Id: <1472223413-7254-6-git-send-email-peter.griffin@linaro.org> In-Reply-To: <1472223413-7254-1-git-send-email-peter.griffin@linaro.org> References: <1472223413-7254-1-git-send-email-peter.griffin@linaro.org> To: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kernel@stlinux.com, vinod.koul@intel.com, patrice.chotard@st.com, dan.j.williams@intel.com, airlied@linux.ie, kraxel@redhat.com, ohad@wizery.com, bjorn.andersson@linaro.org Cc: peter.griffin@linaro.org, lee.jones@linaro.org, dmaengine@vger.kernel.org, devicetree@vger.kernel.org, dri-devel@lists.freedesktop.org, virtualization@lists.linux-foundation.org, linux-remoteproc@vger.kernel.org, Ludovic Barre List-ID: This patch adds support for the Flexible Direct Memory Access (FDMA) core driver. The FDMA is a slim core CPU with a dedicated firmware. It is a general purpose DMA controller capable of supporting 16 independent DMA channels. Data moves maybe from memory to memory or between memory and paced latency critical real time targets and it is found on al STi based chipsets. Signed-off-by: Ludovic Barre Signed-off-by: Peter Griffin --- drivers/dma/Kconfig | 14 +- drivers/dma/Makefile | 1 + drivers/dma/st_fdma.c | 880 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 894 insertions(+), 1 deletion(-) create mode 100644 drivers/dma/st_fdma.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 739f797..5b5a341 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -437,6 +437,19 @@ config STE_DMA40 help Support for ST-Ericsson DMA40 controller +config ST_FDMA + tristate "ST FDMA dmaengine support" + depends on ARCH_STI + select ST_SLIM_REMOTEPROC + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for ST FDMA controller. + It supports 16 independent DMA channels, accepts up to 32 DMA requests + + Say Y here if you have such a chipset. + If unsure, say N. + config STM32_DMA bool "STMicroelectronics STM32 DMA support" depends on ARCH_STM32 @@ -567,7 +580,6 @@ config ZX_DMA help Support the DMA engine for ZTE ZX296702 platform devices. - # driver files source "drivers/dma/bestcomm/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index e4dc9ca..a4fa336 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_ZX_DMA) += zx296702_dma.o +obj-$(CONFIG_ST_FDMA) += st_fdma.o obj-y += qcom/ obj-y += xilinx/ diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c new file mode 100644 index 0000000..bb8d8a7 --- /dev/null +++ b/drivers/dma/st_fdma.c @@ -0,0 +1,880 @@ +/* + * st_fdma.c + * + * Copyright (C) 2014 STMicroelectronics + * Author: Ludovic Barre + * Peter Griffin + * License terms: GNU General Public License (GPL), version 2 + */ +#include +#include +#include +#include +#include +#include +#include + +#include "st_fdma.h" + +static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c) +{ + return container_of(c, struct st_fdma_chan, vchan.chan); +} + +static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct st_fdma_desc, vdesc); +} + +static int st_fdma_dreq_get(struct st_fdma_chan *fchan) +{ + struct st_fdma_dev *fdev = fchan->fdev; + u32 req_line_cfg = fchan->cfg.req_line; + u32 dreq_line; + int try = 0; + + /* + * dreq_mask is shared for n channels of fdma, so all accesses must be + * atomic. if the dreq_mask is changed between ffz and set_bit, + * we retry + */ + do { + if (fdev->dreq_mask == ~0L) { + dev_err(fdev->dev, "No req lines available\n"); + return -EINVAL; + } + + if (try || req_line_cfg >= ST_FDMA_NR_DREQS) { + dev_err(fdev->dev, "Invalid or used req line\n"); + return -EINVAL; + } else { + dreq_line = req_line_cfg; + } + + try++; + } while (test_and_set_bit(dreq_line, &fdev->dreq_mask)); + + dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n", + dreq_line, fdev->dreq_mask); + + return dreq_line; +} + +static void st_fdma_dreq_put(struct st_fdma_chan *fchan) +{ + struct st_fdma_dev *fdev = fchan->fdev; + + dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line); + clear_bit(fchan->dreq_line, &fdev->dreq_mask); +} + +static void st_fdma_xfer_desc(struct st_fdma_chan *fchan) +{ + struct virt_dma_desc *vdesc; + unsigned long nbytes, ch_cmd, cmd; + + vdesc = vchan_next_desc(&fchan->vchan); + if (!vdesc) + return; + + fchan->fdesc = to_st_fdma_desc(vdesc); + nbytes = fchan->fdesc->node[0].desc->nbytes; + cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id); + ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START; + + /* start the channel for the descriptor */ + fnode_write(fchan, nbytes, FDMA_CNTN_OFST); + fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST); + writel(cmd, + fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST); + + dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id); +} + +static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan, + unsigned long int_sta) +{ + unsigned long ch_sta, ch_err; + int ch_id = fchan->vchan.chan.chan_id; + struct st_fdma_dev *fdev = fchan->fdev; + + ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST); + ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK; + ch_sta &= FDMA_CH_CMD_STA_MASK; + + if (int_sta & FDMA_INT_STA_ERR) { + dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err); + fchan->status = DMA_ERROR; + return; + } + + switch (ch_sta) { + case FDMA_CH_CMD_STA_PAUSED: + fchan->status = DMA_PAUSED; + break; + + case FDMA_CH_CMD_STA_RUNNING: + fchan->status = DMA_IN_PROGRESS; + break; + } +} + +static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id) +{ + struct st_fdma_dev *fdev = dev_id; + irqreturn_t ret = IRQ_NONE; + struct st_fdma_chan *fchan = &fdev->chans[0]; + unsigned long int_sta, clr; + + int_sta = fdma_read(fdev, FDMA_INT_STA_OFST); + clr = int_sta; + + for (; int_sta != 0 ; int_sta >>= 2, fchan++) { + if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR))) + continue; + + spin_lock(&fchan->vchan.lock); + st_fdma_ch_sta_update(fchan, int_sta); + + if (fchan->fdesc) { + if (!fchan->fdesc->iscyclic) { + list_del(&fchan->fdesc->vdesc.node); + vchan_cookie_complete(&fchan->fdesc->vdesc); + fchan->fdesc = NULL; + fchan->status = DMA_COMPLETE; + } else { + vchan_cyclic_callback(&fchan->fdesc->vdesc); + } + + /* Start the next descriptor (if available) */ + if (!fchan->fdesc) + st_fdma_xfer_desc(fchan); + } + + spin_unlock(&fchan->vchan.lock); + ret = IRQ_HANDLED; + } + + fdma_write(fdev, clr, FDMA_INT_CLR_OFST); + + return ret; +} + +static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct st_fdma_dev *fdev = ofdma->of_dma_data; + struct dma_chan *chan; + struct st_fdma_chan *fchan; + int ret; + + if (dma_spec->args_count < 1) + return ERR_PTR(-EINVAL); + + if (fdev->dma_device.dev->of_node != dma_spec->np) + return ERR_PTR(-EINVAL); + + ret = rproc_boot(fdev->slim_rproc->rproc); + if (ret == -ENOENT) + return ERR_PTR(-EPROBE_DEFER); + else if (ret) + return ERR_PTR(ret); + + chan = dma_get_any_slave_channel(&fdev->dma_device); + if (!chan) + goto err_chan; + + fchan = to_st_fdma_chan(chan); + + fchan->cfg.of_node = dma_spec->np; + fchan->cfg.req_line = dma_spec->args[0]; + fchan->cfg.req_ctrl = 0; + fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN; + + if (dma_spec->args_count > 1) + fchan->cfg.req_ctrl = dma_spec->args[1] + & FDMA_REQ_CTRL_CFG_MASK; + + if (dma_spec->args_count > 2) + fchan->cfg.type = dma_spec->args[2]; + + if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) { + fchan->dreq_line = 0; + } else { + fchan->dreq_line = st_fdma_dreq_get(fchan); + if (IS_ERR_VALUE(fchan->dreq_line)) { + chan = ERR_PTR(fchan->dreq_line); + goto err_chan; + } + } + + dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n", + fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl); + + return chan; + +err_chan: + rproc_shutdown(fdev->slim_rproc->rproc); + return chan; + +} + +static void st_fdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct st_fdma_desc *fdesc; + int i; + + fdesc = to_st_fdma_desc(vdesc); + for (i = 0; i < fdesc->n_nodes; i++) + dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc, + fdesc->node[i].pdesc); + kfree(fdesc); +} + +static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan, + int sg_len) +{ + struct st_fdma_desc *fdesc; + int i; + + fdesc = kzalloc(sizeof(*fdesc) + + sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT); + if (!fdesc) + return NULL; + + fdesc->fchan = fchan; + fdesc->n_nodes = sg_len; + for (i = 0; i < sg_len; i++) { + fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool, + GFP_NOWAIT, &fdesc->node[i].pdesc); + if (!fdesc->node[i].desc) + goto err; + } + return fdesc; + +err: + while (--i >= 0) + dma_pool_free(fchan->node_pool, fdesc->node[i].desc, + fdesc->node[i].pdesc); + kfree(fdesc); + return NULL; +} + +static int st_fdma_alloc_chan_res(struct dma_chan *chan) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + + /* Create the dma pool for descriptor allocation */ + fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device), + fchan->fdev->dev, + sizeof(struct st_fdma_hw_node), + __alignof__(struct st_fdma_hw_node), + 0); + + if (!fchan->node_pool) { + dev_err(fchan->fdev->dev, "unable to allocate desc pool\n"); + return -ENOMEM; + } + + dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n", + fchan->vchan.chan.chan_id, fchan->cfg.type); + + return 0; +} + +static void st_fdma_free_chan_res(struct dma_chan *chan) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + struct rproc *rproc = fchan->fdev->slim_rproc->rproc; + unsigned long flags; + + LIST_HEAD(head); + + dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", + __func__, fchan->vchan.chan.chan_id); + + if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN) + st_fdma_dreq_put(fchan); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + fchan->fdesc = NULL; + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + dma_pool_destroy(fchan->node_pool); + fchan->node_pool = NULL; + memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg)); + + rproc_shutdown(rproc); +} + +static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct st_fdma_chan *fchan; + struct st_fdma_desc *fdesc; + struct st_fdma_hw_node *hw_node; + + if (!len) + return NULL; + + fchan = to_st_fdma_chan(chan); + + /* We only require a single descriptor */ + fdesc = st_fdma_alloc_desc(fchan, 1); + if (!fdesc) { + dev_err(fchan->fdev->dev, "no memory for desc\n"); + return NULL; + } + + hw_node = fdesc->node[0].desc; + hw_node->next = 0; + hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN; + hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; + hw_node->control |= FDMA_NODE_CTRL_DST_INCR; + hw_node->control |= FDMA_NODE_CTRL_INT_EON; + hw_node->nbytes = len; + hw_node->saddr = src; + hw_node->daddr = dst; + hw_node->generic.length = len; + hw_node->generic.sstride = 0; + hw_node->generic.dstride = 0; + + return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); +} + +static int config_reqctrl(struct st_fdma_chan *fchan, + enum dma_transfer_direction direction) +{ + u32 maxburst = 0, addr = 0; + enum dma_slave_buswidth width; + int ch_id = fchan->vchan.chan.chan_id; + struct st_fdma_dev *fdev = fchan->fdev; + + switch (direction) { + + case DMA_DEV_TO_MEM: + fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR; + maxburst = fchan->scfg.src_maxburst; + width = fchan->scfg.src_addr_width; + addr = fchan->scfg.src_addr; + break; + + case DMA_MEM_TO_DEV: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR; + maxburst = fchan->scfg.dst_maxburst; + width = fchan->scfg.dst_addr_width; + addr = fchan->scfg.dst_addr; + break; + + default: + return -EINVAL; + } + + fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK; + + switch (width) { + + case DMA_SLAVE_BUSWIDTH_1_BYTE: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1; + break; + + case DMA_SLAVE_BUSWIDTH_2_BYTES: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2; + break; + + case DMA_SLAVE_BUSWIDTH_4_BYTES: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4; + break; + + case DMA_SLAVE_BUSWIDTH_8_BYTES: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8; + break; + + default: + return -EINVAL; + } + + fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK; + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1); + dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST); + + fchan->cfg.dev_addr = addr; + fchan->cfg.dir = direction; + + dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n", + ch_id, addr, fchan->cfg.req_ctrl); + + return 0; +} + +static void fill_hw_node(struct st_fdma_hw_node *hw_node, + struct st_fdma_chan *fchan, + enum dma_transfer_direction direction) +{ + if (direction == DMA_MEM_TO_DEV) { + hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; + hw_node->control |= FDMA_NODE_CTRL_DST_STATIC; + hw_node->daddr = fchan->cfg.dev_addr; + } else { + hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC; + hw_node->control |= FDMA_NODE_CTRL_DST_INCR; + hw_node->saddr = fchan->cfg.dev_addr; + } + + hw_node->generic.sstride = 0; + hw_node->generic.dstride = 0; +} + +static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan, + size_t len, enum dma_transfer_direction direction) +{ + struct st_fdma_chan *fchan; + + if (!chan || !len) + return NULL; + + fchan = to_st_fdma_chan(chan); + + if (!is_slave_direction(direction)) { + dev_err(fchan->fdev->dev, "bad direction?\n"); + return NULL; + } + + return fchan; +} + +static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct st_fdma_chan *fchan; + struct st_fdma_desc *fdesc; + int sg_len, i; + + fchan = st_fdma_prep_common(chan, len, direction); + if (!fchan) + return NULL; + + if (!period_len) + return NULL; + + if (config_reqctrl(fchan, direction)) { + dev_err(fchan->fdev->dev, "bad width or direction\n"); + return NULL; + } + + /* the buffer length must be a multiple of period_len */ + if (len % period_len != 0) { + dev_err(fchan->fdev->dev, "len is not multiple of period\n"); + return NULL; + } + + sg_len = len / period_len; + fdesc = st_fdma_alloc_desc(fchan, sg_len); + if (!fdesc) { + dev_err(fchan->fdev->dev, "no memory for desc\n"); + return NULL; + } + + fdesc->iscyclic = true; + + for (i = 0; i < sg_len; i++) { + struct st_fdma_hw_node *hw_node = fdesc->node[i].desc; + + hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; + + hw_node->control = + FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); + hw_node->control |= FDMA_NODE_CTRL_INT_EON; + + fill_hw_node(hw_node, fchan, direction); + + if (direction == DMA_MEM_TO_DEV) + hw_node->saddr = buf_addr + (i * period_len); + else + hw_node->daddr = buf_addr + (i * period_len); + + hw_node->nbytes = period_len; + hw_node->generic.length = period_len; + } + + return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); +} + +static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct st_fdma_chan *fchan; + struct st_fdma_desc *fdesc; + struct st_fdma_hw_node *hw_node; + struct scatterlist *sg; + int i; + + fchan = st_fdma_prep_common(chan, sg_len, direction); + if (!fchan) + return NULL; + + if (!sgl) + return NULL; + + fdesc = st_fdma_alloc_desc(fchan, sg_len); + if (!fdesc) { + dev_err(fchan->fdev->dev, "no memory for desc\n"); + return NULL; + } + + fdesc->iscyclic = false; + + for_each_sg(sgl, sg, sg_len, i) { + hw_node = fdesc->node[i].desc; + + hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; + hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); + + fill_hw_node(hw_node, fchan, direction); + + if (direction == DMA_MEM_TO_DEV) + hw_node->saddr = sg_dma_address(sg); + else + hw_node->daddr = sg_dma_address(sg); + + hw_node->nbytes = sg_dma_len(sg); + hw_node->generic.length = sg_dma_len(sg); + } + + /* interrupt at end of last node */ + hw_node->control |= FDMA_NODE_CTRL_INT_EON; + + return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); +} + +static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan, + struct virt_dma_desc *vdesc, + bool in_progress) +{ + struct st_fdma_desc *fdesc = fchan->fdesc; + size_t residue = 0; + dma_addr_t cur_addr = 0; + int i; + + if (in_progress) { + cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST); + cur_addr &= FDMA_CH_CMD_DATA_MASK; + } + + for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) { + if (cur_addr == fdesc->node[i].pdesc) { + residue += fnode_read(fchan, FDMA_CNTN_OFST); + break; + } + residue += fdesc->node[i].desc->nbytes; + } + + return residue; +} + +static enum dma_status st_fdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&fchan->vchan.lock, flags); + vd = vchan_find_desc(&fchan->vchan, cookie); + if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie) + txstate->residue = st_fdma_desc_residue(fchan, vd, true); + else if (vd) + txstate->residue = st_fdma_desc_residue(fchan, vd, false); + else + txstate->residue = 0; + + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + return ret; +} + +static void st_fdma_issue_pending(struct dma_chan *chan) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&fchan->vchan.lock, flags); + + if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc) + st_fdma_xfer_desc(fchan); + + spin_unlock_irqrestore(&fchan->vchan.lock, flags); +} + +static int st_fdma_pause(struct dma_chan *chan) +{ + unsigned long flags; + LIST_HEAD(head); + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + int ch_id = fchan->vchan.chan.chan_id; + unsigned long cmd = FDMA_CMD_PAUSE(ch_id); + + dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + if (fchan->fdesc) + fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST); + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + return 0; +} + +static int st_fdma_resume(struct dma_chan *chan) +{ + unsigned long flags; + unsigned long val; + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + int ch_id = fchan->vchan.chan.chan_id; + + dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + if (fchan->fdesc) { + val = fchan_read(fchan, FDMA_CH_CMD_OFST); + val &= FDMA_CH_CMD_DATA_MASK; + fchan_write(fchan, val, FDMA_CH_CMD_OFST); + } + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + return 0; +} + +static int st_fdma_terminate_all(struct dma_chan *chan) +{ + unsigned long flags; + LIST_HEAD(head); + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + int ch_id = fchan->vchan.chan.chan_id; + unsigned long cmd = FDMA_CMD_PAUSE(ch_id); + + dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST); + fchan->fdesc = NULL; + vchan_get_all_descriptors(&fchan->vchan, &head); + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + vchan_dma_desc_free_list(&fchan->vchan, &head); + + return 0; +} + +static int st_fdma_slave_config(struct dma_chan *chan, + struct dma_slave_config *slave_cfg) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + + memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg)); + return 0; +} + +static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = { + .name = "STiH407", + .id = 0, +}; + +static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = { + .name = "STiH407", + .id = 1, +}; + +static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = { + .name = "STiH407", + .id = 2, +}; + +static const struct of_device_id st_fdma_match[] = { + { .compatible = "st,stih407-fdma-mpe31-11" + , .data = &fdma_mpe31_stih407_11 }, + { .compatible = "st,stih407-fdma-mpe31-12" + , .data = &fdma_mpe31_stih407_12 }, + { .compatible = "st,stih407-fdma-mpe31-13" + , .data = &fdma_mpe31_stih407_13 }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_fdma_match); + +static int st_fdma_parse_dt(struct platform_device *pdev, + const struct st_fdma_driverdata *drvdata, + struct st_fdma_dev *fdev) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + + if (!np) + goto err; + + ret = of_property_read_u32(np, "dma-channels", &fdev->nr_channels); + if (ret) + goto err; + + snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf", + drvdata->name, drvdata->id); + +err: + return ret; +} +#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static int st_fdma_probe(struct platform_device *pdev) +{ + struct st_fdma_dev *fdev; + const struct of_device_id *match; + struct device_node *np = pdev->dev.of_node; + const struct st_fdma_driverdata *drvdata; + int ret, i; + + match = of_match_device((st_fdma_match), &pdev->dev); + if (!match || !match->data) { + dev_err(&pdev->dev, "No device match found\n"); + return -ENODEV; + } + + drvdata = match->data; + + fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL); + if (!fdev) + return -ENOMEM; + + ret = st_fdma_parse_dt(pdev, drvdata, fdev); + if (ret) { + dev_err(&pdev->dev, "unable to find platform data\n"); + goto err; + } + + fdev->chans = devm_kzalloc(&pdev->dev, + fdev->nr_channels + * sizeof(struct st_fdma_chan), GFP_KERNEL); + if (!fdev->chans) + return -ENOMEM; + + fdev->dev = &pdev->dev; + fdev->drvdata = drvdata; + platform_set_drvdata(pdev, fdev); + + fdev->irq = platform_get_irq(pdev, 0); + if (fdev->irq < 0) { + dev_err(&pdev->dev, "Failed to get irq resource\n"); + return -EINVAL; + } + + ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0, + dev_name(&pdev->dev), fdev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret); + goto err; + } + + fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name); + if (!fdev->slim_rproc) { + ret = PTR_ERR(fdev->slim_rproc); + dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret); + goto err; + } + + /* Initialise list of FDMA channels */ + INIT_LIST_HEAD(&fdev->dma_device.channels); + for (i = 0; i < fdev->nr_channels; i++) { + struct st_fdma_chan *fchan = &fdev->chans[i]; + + fchan->fdev = fdev; + fchan->vchan.desc_free = st_fdma_free_desc; + vchan_init(&fchan->vchan, &fdev->dma_device); + } + + /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */ + fdev->dreq_mask = BIT(0) | BIT(31); + + dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask); + dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask); + dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask); + + fdev->dma_device.dev = &pdev->dev; + fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res; + fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res; + fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic; + fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg; + fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy; + fdev->dma_device.device_tx_status = st_fdma_tx_status; + fdev->dma_device.device_issue_pending = st_fdma_issue_pending; + fdev->dma_device.device_terminate_all = st_fdma_terminate_all; + fdev->dma_device.device_config = st_fdma_slave_config; + fdev->dma_device.device_pause = st_fdma_pause; + fdev->dma_device.device_resume = st_fdma_resume; + + fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS; + fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS; + fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + ret = dma_async_device_register(&fdev->dma_device); + if (ret) { + dev_err(&pdev->dev, + "Failed to register DMA device (%d)\n", ret); + goto err_rproc; + } + + ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev); + if (ret) { + dev_err(&pdev->dev, + "Failed to register controller (%d)\n", ret); + goto err_dma_dev; + } + + dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq); + + return 0; + +err_dma_dev: + dma_async_device_unregister(&fdev->dma_device); +err_rproc: + st_slim_rproc_put(fdev->slim_rproc); +err: + return ret; +} + +static int st_fdma_remove(struct platform_device *pdev) +{ + struct st_fdma_dev *fdev = platform_get_drvdata(pdev); + + devm_free_irq(&pdev->dev, fdev->irq, fdev); + st_slim_rproc_put(fdev->slim_rproc); + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&fdev->dma_device); + + return 0; +} + +static struct platform_driver st_fdma_platform_driver = { + .driver = { + .name = "st-fdma", + .of_match_table = st_fdma_match, + }, + .probe = st_fdma_probe, + .remove = st_fdma_remove, +}; +module_platform_driver(st_fdma_platform_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); +MODULE_AUTHOR("Ludovic.barre "); +MODULE_AUTHOR("Peter Griffin "); -- 1.9.1 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Peter Griffin Subject: [PATCH v8 05/18] dmaengine: st_fdma: Add STMicroelectronics FDMA engine driver support Date: Fri, 26 Aug 2016 15:56:40 +0100 Message-ID: <1472223413-7254-6-git-send-email-peter.griffin@linaro.org> References: <1472223413-7254-1-git-send-email-peter.griffin@linaro.org> Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: base64 Return-path: In-Reply-To: <1472223413-7254-1-git-send-email-peter.griffin@linaro.org> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" To: linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, kernel@stlinux.com, vinod.koul@intel.com, patrice.chotard@st.com, dan.j.williams@intel.com, airlied@linux.ie, kraxel@redhat.com, ohad@wizery.com, bjorn.andersson@linaro.org Cc: devicetree@vger.kernel.org, linux-remoteproc@vger.kernel.org, dri-devel@lists.freedesktop.org, virtualization@lists.linux-foundation.org, peter.griffin@linaro.org, dmaengine@vger.kernel.org, lee.jones@linaro.org, Ludovic Barre List-Id: devicetree@vger.kernel.org VGhpcyBwYXRjaCBhZGRzIHN1cHBvcnQgZm9yIHRoZSBGbGV4aWJsZSBEaXJlY3QgTWVtb3J5IEFj Y2VzcyAoRkRNQSkgY29yZQpkcml2ZXIuIFRoZSBGRE1BIGlzIGEgc2xpbSBjb3JlIENQVSB3aXRo IGEgZGVkaWNhdGVkIGZpcm13YXJlLgpJdCBpcyBhIGdlbmVyYWwgcHVycG9zZSBETUEgY29udHJv bGxlciBjYXBhYmxlIG9mIHN1cHBvcnRpbmcgMTYKaW5kZXBlbmRlbnQgRE1BIGNoYW5uZWxzLiBE YXRhIG1vdmVzIG1heWJlIGZyb20gbWVtb3J5IHRvIG1lbW9yeQpvciBiZXR3ZWVuIG1lbW9yeSBh bmQgcGFjZWQgbGF0ZW5jeSBjcml0aWNhbCByZWFsIHRpbWUgdGFyZ2V0cyBhbmQgaXQKaXMgZm91 bmQgb24gYWwgU1RpIGJhc2VkIGNoaXBzZXRzLgoKU2lnbmVkLW9mZi1ieTogTHVkb3ZpYyBCYXJy ZSA8bHVkb3ZpYy5iYXJyZUBzdC5jb20+ClNpZ25lZC1vZmYtYnk6IFBldGVyIEdyaWZmaW4gPHBl dGVyLmdyaWZmaW5AbGluYXJvLm9yZz4KLS0tCiBkcml2ZXJzL2RtYS9LY29uZmlnICAgfCAgMTQg Ky0KIGRyaXZlcnMvZG1hL01ha2VmaWxlICB8ICAgMSArCiBkcml2ZXJzL2RtYS9zdF9mZG1hLmMg fCA4ODAgKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysK IDMgZmlsZXMgY2hhbmdlZCwgODk0IGluc2VydGlvbnMoKyksIDEgZGVsZXRpb24oLSkKIGNyZWF0 ZSBtb2RlIDEwMDY0NCBkcml2ZXJzL2RtYS9zdF9mZG1hLmMKCmRpZmYgLS1naXQgYS9kcml2ZXJz L2RtYS9LY29uZmlnIGIvZHJpdmVycy9kbWEvS2NvbmZpZwppbmRleCA3MzlmNzk3Li41YjVhMzQx IDEwMDY0NAotLS0gYS9kcml2ZXJzL2RtYS9LY29uZmlnCisrKyBiL2RyaXZlcnMvZG1hL0tjb25m aWcKQEAgLTQzNyw2ICs0MzcsMTkgQEAgY29uZmlnIFNURV9ETUE0MAogCWhlbHAKIAkgIFN1cHBv cnQgZm9yIFNULUVyaWNzc29uIERNQTQwIGNvbnRyb2xsZXIKIAorY29uZmlnIFNUX0ZETUEKKwl0 cmlzdGF0ZSAiU1QgRkRNQSBkbWFlbmdpbmUgc3VwcG9ydCIKKwlkZXBlbmRzIG9uIEFSQ0hfU1RJ CisJc2VsZWN0IFNUX1NMSU1fUkVNT1RFUFJPQworCXNlbGVjdCBETUFfRU5HSU5FCisJc2VsZWN0 IERNQV9WSVJUVUFMX0NIQU5ORUxTCisJaGVscAorCSAgRW5hYmxlIHN1cHBvcnQgZm9yIFNUIEZE TUEgY29udHJvbGxlci4KKwkgIEl0IHN1cHBvcnRzIDE2IGluZGVwZW5kZW50IERNQSBjaGFubmVs cywgYWNjZXB0cyB1cCB0byAzMiBETUEgcmVxdWVzdHMKKworCSAgU2F5IFkgaGVyZSBpZiB5b3Ug aGF2ZSBzdWNoIGEgY2hpcHNldC4KKwkgIElmIHVuc3VyZSwgc2F5IE4uCisKIGNvbmZpZyBTVE0z Ml9ETUEKIAlib29sICJTVE1pY3JvZWxlY3Ryb25pY3MgU1RNMzIgRE1BIHN1cHBvcnQiCiAJZGVw ZW5kcyBvbiBBUkNIX1NUTTMyCkBAIC01NjcsNyArNTgwLDYgQEAgY29uZmlnIFpYX0RNQQogCWhl bHAKIAkgIFN1cHBvcnQgdGhlIERNQSBlbmdpbmUgZm9yIFpURSBaWDI5NjcwMiBwbGF0Zm9ybSBk ZXZpY2VzLgogCi0KICMgZHJpdmVyIGZpbGVzCiBzb3VyY2UgImRyaXZlcnMvZG1hL2Jlc3Rjb21t L0tjb25maWciCiAKZGlmZiAtLWdpdCBhL2RyaXZlcnMvZG1hL01ha2VmaWxlIGIvZHJpdmVycy9k bWEvTWFrZWZpbGUKaW5kZXggZTRkYzljYS4uYTRmYTMzNiAxMDA2NDQKLS0tIGEvZHJpdmVycy9k bWEvTWFrZWZpbGUKKysrIGIvZHJpdmVycy9kbWEvTWFrZWZpbGUKQEAgLTY3LDYgKzY3LDcgQEAg b2JqLSQoQ09ORklHX1RJX0RNQV9DUk9TU0JBUikgKz0gdGktZG1hLWNyb3NzYmFyLm8KIG9iai0k KENPTkZJR19USV9FRE1BKSArPSBlZG1hLm8KIG9iai0kKENPTkZJR19YR0VORV9ETUEpICs9IHhn ZW5lLWRtYS5vCiBvYmotJChDT05GSUdfWlhfRE1BKSArPSB6eDI5NjcwMl9kbWEubworb2JqLSQo Q09ORklHX1NUX0ZETUEpICs9IHN0X2ZkbWEubwogCiBvYmoteSArPSBxY29tLwogb2JqLXkgKz0g eGlsaW54LwpkaWZmIC0tZ2l0IGEvZHJpdmVycy9kbWEvc3RfZmRtYS5jIGIvZHJpdmVycy9kbWEv c3RfZmRtYS5jCm5ldyBmaWxlIG1vZGUgMTAwNjQ0CmluZGV4IDAwMDAwMDAuLmJiOGQ4YTcKLS0t IC9kZXYvbnVsbAorKysgYi9kcml2ZXJzL2RtYS9zdF9mZG1hLmMKQEAgLTAsMCArMSw4ODAgQEAK Ky8qCisgKiBzdF9mZG1hLmMKKyAqCisgKiBDb3B5cmlnaHQgKEMpIDIwMTQgU1RNaWNyb2VsZWN0 cm9uaWNzCisgKiBBdXRob3I6IEx1ZG92aWMgQmFycmUgPEx1ZG92aWMuYmFycmVAc3QuY29tPgor ICoJICAgUGV0ZXIgR3JpZmZpbiA8cGV0ZXIuZ3JpZmZpbkBsaW5hcm8ub3JnPgorICogTGljZW5z ZSB0ZXJtczogIEdOVSBHZW5lcmFsIFB1YmxpYyBMaWNlbnNlIChHUEwpLCB2ZXJzaW9uIDIKKyAq LworI2luY2x1ZGUgPGxpbnV4L2luaXQuaD4KKyNpbmNsdWRlIDxsaW51eC9tb2R1bGUuaD4KKyNp bmNsdWRlIDxsaW51eC9vZl9kZXZpY2UuaD4KKyNpbmNsdWRlIDxsaW51eC9vZl9kbWEuaD4KKyNp bmNsdWRlIDxsaW51eC9wbGF0Zm9ybV9kZXZpY2UuaD4KKyNpbmNsdWRlIDxsaW51eC9pbnRlcnJ1 cHQuaD4KKyNpbmNsdWRlIDxsaW51eC9yZW1vdGVwcm9jLmg+CisKKyNpbmNsdWRlICJzdF9mZG1h LmgiCisKK3N0YXRpYyBpbmxpbmUgc3RydWN0IHN0X2ZkbWFfY2hhbiAqdG9fc3RfZmRtYV9jaGFu KHN0cnVjdCBkbWFfY2hhbiAqYykKK3sKKwlyZXR1cm4gY29udGFpbmVyX29mKGMsIHN0cnVjdCBz dF9mZG1hX2NoYW4sIHZjaGFuLmNoYW4pOworfQorCitzdGF0aWMgc3RydWN0IHN0X2ZkbWFfZGVz YyAqdG9fc3RfZmRtYV9kZXNjKHN0cnVjdCB2aXJ0X2RtYV9kZXNjICp2ZCkKK3sKKwlyZXR1cm4g Y29udGFpbmVyX29mKHZkLCBzdHJ1Y3Qgc3RfZmRtYV9kZXNjLCB2ZGVzYyk7Cit9CisKK3N0YXRp YyBpbnQgc3RfZmRtYV9kcmVxX2dldChzdHJ1Y3Qgc3RfZmRtYV9jaGFuICpmY2hhbikKK3sKKwlz dHJ1Y3Qgc3RfZmRtYV9kZXYgKmZkZXYgPSBmY2hhbi0+ZmRldjsKKwl1MzIgcmVxX2xpbmVfY2Zn ID0gZmNoYW4tPmNmZy5yZXFfbGluZTsKKwl1MzIgZHJlcV9saW5lOworCWludCB0cnkgPSAwOwor CisJLyoKKwkgKiBkcmVxX21hc2sgaXMgc2hhcmVkIGZvciBuIGNoYW5uZWxzIG9mIGZkbWEsIHNv IGFsbCBhY2Nlc3NlcyBtdXN0IGJlCisJICogYXRvbWljLiBpZiB0aGUgZHJlcV9tYXNrIGlzIGNo YW5nZWQgYmV0d2VlbiBmZnogYW5kIHNldF9iaXQsCisJICogd2UgcmV0cnkKKwkgKi8KKwlkbyB7 CisJCWlmIChmZGV2LT5kcmVxX21hc2sgPT0gfjBMKSB7CisJCQlkZXZfZXJyKGZkZXYtPmRldiwg Ik5vIHJlcSBsaW5lcyBhdmFpbGFibGVcbiIpOworCQkJcmV0dXJuIC1FSU5WQUw7CisJCX0KKwor CQlpZiAodHJ5IHx8IHJlcV9saW5lX2NmZyA+PSBTVF9GRE1BX05SX0RSRVFTKSB7CisJCQlkZXZf ZXJyKGZkZXYtPmRldiwgIkludmFsaWQgb3IgdXNlZCByZXEgbGluZVxuIik7CisJCQlyZXR1cm4g LUVJTlZBTDsKKwkJfSBlbHNlIHsKKwkJCWRyZXFfbGluZSA9IHJlcV9saW5lX2NmZzsKKwkJfQor CisJCXRyeSsrOworCX0gd2hpbGUgKHRlc3RfYW5kX3NldF9iaXQoZHJlcV9saW5lLCAmZmRldi0+ ZHJlcV9tYXNrKSk7CisKKwlkZXZfZGJnKGZkZXYtPmRldiwgImdldCBkcmVxX2xpbmU6JWQgbWFz azolI2x4XG4iLAorCQlkcmVxX2xpbmUsIGZkZXYtPmRyZXFfbWFzayk7CisKKwlyZXR1cm4gZHJl cV9saW5lOworfQorCitzdGF0aWMgdm9pZCBzdF9mZG1hX2RyZXFfcHV0KHN0cnVjdCBzdF9mZG1h X2NoYW4gKmZjaGFuKQoreworCXN0cnVjdCBzdF9mZG1hX2RldiAqZmRldiA9IGZjaGFuLT5mZGV2 OworCisJZGV2X2RiZyhmZGV2LT5kZXYsICJwdXQgZHJlcV9saW5lOiUjeFxuIiwgZmNoYW4tPmRy ZXFfbGluZSk7CisJY2xlYXJfYml0KGZjaGFuLT5kcmVxX2xpbmUsICZmZGV2LT5kcmVxX21hc2sp OworfQorCitzdGF0aWMgdm9pZCBzdF9mZG1hX3hmZXJfZGVzYyhzdHJ1Y3Qgc3RfZmRtYV9jaGFu ICpmY2hhbikKK3sKKwlzdHJ1Y3QgdmlydF9kbWFfZGVzYyAqdmRlc2M7CisJdW5zaWduZWQgbG9u ZyBuYnl0ZXMsIGNoX2NtZCwgY21kOworCisJdmRlc2MgPSB2Y2hhbl9uZXh0X2Rlc2MoJmZjaGFu LT52Y2hhbik7CisJaWYgKCF2ZGVzYykKKwkJcmV0dXJuOworCisJZmNoYW4tPmZkZXNjID0gdG9f c3RfZmRtYV9kZXNjKHZkZXNjKTsKKwluYnl0ZXMgPSBmY2hhbi0+ZmRlc2MtPm5vZGVbMF0uZGVz Yy0+bmJ5dGVzOworCWNtZCA9IEZETUFfQ01EX1NUQVJUKGZjaGFuLT52Y2hhbi5jaGFuLmNoYW5f aWQpOworCWNoX2NtZCA9IGZjaGFuLT5mZGVzYy0+bm9kZVswXS5wZGVzYyB8IEZETUFfQ0hfQ01E X1NUQV9TVEFSVDsKKworCS8qIHN0YXJ0IHRoZSBjaGFubmVsIGZvciB0aGUgZGVzY3JpcHRvciAq LworCWZub2RlX3dyaXRlKGZjaGFuLCBuYnl0ZXMsIEZETUFfQ05UTl9PRlNUKTsKKwlmY2hhbl93 cml0ZShmY2hhbiwgY2hfY21kLCBGRE1BX0NIX0NNRF9PRlNUKTsKKwl3cml0ZWwoY21kLAorCQlm Y2hhbi0+ZmRldi0+c2xpbV9ycHJvYy0+cGVyaSArIEZETUFfQ01EX1NFVF9PRlNUKTsKKworCWRl dl9kYmcoZmNoYW4tPmZkZXYtPmRldiwgInN0YXJ0IGNoYW46JWRcbiIsIGZjaGFuLT52Y2hhbi5j aGFuLmNoYW5faWQpOworfQorCitzdGF0aWMgdm9pZCBzdF9mZG1hX2NoX3N0YV91cGRhdGUoc3Ry dWN0IHN0X2ZkbWFfY2hhbiAqZmNoYW4sCisJCQkJICB1bnNpZ25lZCBsb25nIGludF9zdGEpCit7 CisJdW5zaWduZWQgbG9uZyBjaF9zdGEsIGNoX2VycjsKKwlpbnQgY2hfaWQgPSBmY2hhbi0+dmNo YW4uY2hhbi5jaGFuX2lkOworCXN0cnVjdCBzdF9mZG1hX2RldiAqZmRldiA9IGZjaGFuLT5mZGV2 OworCisJY2hfc3RhID0gZmNoYW5fcmVhZChmY2hhbiwgRkRNQV9DSF9DTURfT0ZTVCk7CisJY2hf ZXJyID0gY2hfc3RhICYgRkRNQV9DSF9DTURfRVJSX01BU0s7CisJY2hfc3RhICY9IEZETUFfQ0hf Q01EX1NUQV9NQVNLOworCisJaWYgKGludF9zdGEgJiBGRE1BX0lOVF9TVEFfRVJSKSB7CisJCWRl dl93YXJuKGZkZXYtPmRldiwgImNoYW46JWQsIGVycm9yOiVsZFxuIiwgY2hfaWQsIGNoX2Vycik7 CisJCWZjaGFuLT5zdGF0dXMgPSBETUFfRVJST1I7CisJCXJldHVybjsKKwl9CisKKwlzd2l0Y2gg KGNoX3N0YSkgeworCWNhc2UgRkRNQV9DSF9DTURfU1RBX1BBVVNFRDoKKwkJZmNoYW4tPnN0YXR1 cyA9IERNQV9QQVVTRUQ7CisJCWJyZWFrOworCisJY2FzZSBGRE1BX0NIX0NNRF9TVEFfUlVOTklO RzoKKwkJZmNoYW4tPnN0YXR1cyA9IERNQV9JTl9QUk9HUkVTUzsKKwkJYnJlYWs7CisJfQorfQor CitzdGF0aWMgaXJxcmV0dXJuX3Qgc3RfZmRtYV9pcnFfaGFuZGxlcihpbnQgaXJxLCB2b2lkICpk ZXZfaWQpCit7CisJc3RydWN0IHN0X2ZkbWFfZGV2ICpmZGV2ID0gZGV2X2lkOworCWlycXJldHVy bl90IHJldCA9IElSUV9OT05FOworCXN0cnVjdCBzdF9mZG1hX2NoYW4gKmZjaGFuID0gJmZkZXYt PmNoYW5zWzBdOworCXVuc2lnbmVkIGxvbmcgaW50X3N0YSwgY2xyOworCisJaW50X3N0YSA9IGZk bWFfcmVhZChmZGV2LCBGRE1BX0lOVF9TVEFfT0ZTVCk7CisJY2xyID0gaW50X3N0YTsKKworCWZv ciAoOyBpbnRfc3RhICE9IDAgOyBpbnRfc3RhID4+PSAyLCBmY2hhbisrKSB7CisJCWlmICghKGlu dF9zdGEgJiAoRkRNQV9JTlRfU1RBX0NIIHwgRkRNQV9JTlRfU1RBX0VSUikpKQorCQkJY29udGlu dWU7CisKKwkJc3Bpbl9sb2NrKCZmY2hhbi0+dmNoYW4ubG9jayk7CisJCXN0X2ZkbWFfY2hfc3Rh X3VwZGF0ZShmY2hhbiwgaW50X3N0YSk7CisKKwkJaWYgKGZjaGFuLT5mZGVzYykgeworCQkJaWYg KCFmY2hhbi0+ZmRlc2MtPmlzY3ljbGljKSB7CisJCQkJbGlzdF9kZWwoJmZjaGFuLT5mZGVzYy0+ dmRlc2Mubm9kZSk7CisJCQkJdmNoYW5fY29va2llX2NvbXBsZXRlKCZmY2hhbi0+ZmRlc2MtPnZk ZXNjKTsKKwkJCQlmY2hhbi0+ZmRlc2MgPSBOVUxMOworCQkJCWZjaGFuLT5zdGF0dXMgPSBETUFf Q09NUExFVEU7CisJCQl9IGVsc2UgeworCQkJCXZjaGFuX2N5Y2xpY19jYWxsYmFjaygmZmNoYW4t PmZkZXNjLT52ZGVzYyk7CisJCQl9CisKKwkJCS8qIFN0YXJ0IHRoZSBuZXh0IGRlc2NyaXB0b3Ig KGlmIGF2YWlsYWJsZSkgKi8KKwkJCWlmICghZmNoYW4tPmZkZXNjKQorCQkJCXN0X2ZkbWFfeGZl cl9kZXNjKGZjaGFuKTsKKwkJfQorCisJCXNwaW5fdW5sb2NrKCZmY2hhbi0+dmNoYW4ubG9jayk7 CisJCXJldCA9IElSUV9IQU5ETEVEOworCX0KKworCWZkbWFfd3JpdGUoZmRldiwgY2xyLCBGRE1B X0lOVF9DTFJfT0ZTVCk7CisKKwlyZXR1cm4gcmV0OworfQorCitzdGF0aWMgc3RydWN0IGRtYV9j aGFuICpzdF9mZG1hX29mX3hsYXRlKHN0cnVjdCBvZl9waGFuZGxlX2FyZ3MgKmRtYV9zcGVjLAor CQkJCQkgc3RydWN0IG9mX2RtYSAqb2ZkbWEpCit7CisJc3RydWN0IHN0X2ZkbWFfZGV2ICpmZGV2 ID0gb2ZkbWEtPm9mX2RtYV9kYXRhOworCXN0cnVjdCBkbWFfY2hhbiAqY2hhbjsKKwlzdHJ1Y3Qg c3RfZmRtYV9jaGFuICpmY2hhbjsKKwlpbnQgcmV0OworCisJaWYgKGRtYV9zcGVjLT5hcmdzX2Nv dW50IDwgMSkKKwkJcmV0dXJuIEVSUl9QVFIoLUVJTlZBTCk7CisKKwlpZiAoZmRldi0+ZG1hX2Rl dmljZS5kZXYtPm9mX25vZGUgIT0gZG1hX3NwZWMtPm5wKQorCQlyZXR1cm4gRVJSX1BUUigtRUlO VkFMKTsKKworCXJldCA9IHJwcm9jX2Jvb3QoZmRldi0+c2xpbV9ycHJvYy0+cnByb2MpOworCWlm IChyZXQgPT0gLUVOT0VOVCkKKwkJcmV0dXJuIEVSUl9QVFIoLUVQUk9CRV9ERUZFUik7CisJZWxz ZSBpZiAocmV0KQorCQlyZXR1cm4gRVJSX1BUUihyZXQpOworCisJY2hhbiA9IGRtYV9nZXRfYW55 X3NsYXZlX2NoYW5uZWwoJmZkZXYtPmRtYV9kZXZpY2UpOworCWlmICghY2hhbikKKwkJZ290byBl cnJfY2hhbjsKKworCWZjaGFuID0gdG9fc3RfZmRtYV9jaGFuKGNoYW4pOworCisJZmNoYW4tPmNm Zy5vZl9ub2RlID0gZG1hX3NwZWMtPm5wOworCWZjaGFuLT5jZmcucmVxX2xpbmUgPSBkbWFfc3Bl Yy0+YXJnc1swXTsKKwlmY2hhbi0+Y2ZnLnJlcV9jdHJsID0gMDsKKwlmY2hhbi0+Y2ZnLnR5cGUg PSBTVF9GRE1BX1RZUEVfRlJFRV9SVU47CisKKwlpZiAoZG1hX3NwZWMtPmFyZ3NfY291bnQgPiAx KQorCQlmY2hhbi0+Y2ZnLnJlcV9jdHJsID0gZG1hX3NwZWMtPmFyZ3NbMV0KKwkJCSYgRkRNQV9S RVFfQ1RSTF9DRkdfTUFTSzsKKworCWlmIChkbWFfc3BlYy0+YXJnc19jb3VudCA+IDIpCisJCWZj aGFuLT5jZmcudHlwZSA9IGRtYV9zcGVjLT5hcmdzWzJdOworCisJaWYgKGZjaGFuLT5jZmcudHlw ZSA9PSBTVF9GRE1BX1RZUEVfRlJFRV9SVU4pIHsKKwkJZmNoYW4tPmRyZXFfbGluZSA9IDA7CisJ fSBlbHNlIHsKKwkJZmNoYW4tPmRyZXFfbGluZSA9IHN0X2ZkbWFfZHJlcV9nZXQoZmNoYW4pOwor CQlpZiAoSVNfRVJSX1ZBTFVFKGZjaGFuLT5kcmVxX2xpbmUpKSB7CisJCQljaGFuID0gRVJSX1BU UihmY2hhbi0+ZHJlcV9saW5lKTsKKwkJCWdvdG8gZXJyX2NoYW47CisJCX0KKwl9CisKKwlkZXZf ZGJnKGZkZXYtPmRldiwgInhsYXRlIHJlcV9saW5lOiVkIHR5cGU6JWQgcmVxX2N0cmw6JSNseFxu IiwKKwkJZmNoYW4tPmNmZy5yZXFfbGluZSwgZmNoYW4tPmNmZy50eXBlLCBmY2hhbi0+Y2ZnLnJl cV9jdHJsKTsKKworCXJldHVybiBjaGFuOworCitlcnJfY2hhbjoKKwlycHJvY19zaHV0ZG93bihm ZGV2LT5zbGltX3Jwcm9jLT5ycHJvYyk7CisJcmV0dXJuIGNoYW47CisKK30KKworc3RhdGljIHZv aWQgc3RfZmRtYV9mcmVlX2Rlc2Moc3RydWN0IHZpcnRfZG1hX2Rlc2MgKnZkZXNjKQoreworCXN0 cnVjdCBzdF9mZG1hX2Rlc2MgKmZkZXNjOworCWludCBpOworCisJZmRlc2MgPSB0b19zdF9mZG1h X2Rlc2ModmRlc2MpOworCWZvciAoaSA9IDA7IGkgPCBmZGVzYy0+bl9ub2RlczsgaSsrKQorCQlk bWFfcG9vbF9mcmVlKGZkZXNjLT5mY2hhbi0+bm9kZV9wb29sLCBmZGVzYy0+bm9kZVtpXS5kZXNj LAorCQkJICAgICAgZmRlc2MtPm5vZGVbaV0ucGRlc2MpOworCWtmcmVlKGZkZXNjKTsKK30KKwor c3RhdGljIHN0cnVjdCBzdF9mZG1hX2Rlc2MgKnN0X2ZkbWFfYWxsb2NfZGVzYyhzdHJ1Y3Qgc3Rf ZmRtYV9jaGFuICpmY2hhbiwKKwkJCQkJICAgICAgIGludCBzZ19sZW4pCit7CisJc3RydWN0IHN0 X2ZkbWFfZGVzYyAqZmRlc2M7CisJaW50IGk7CisKKwlmZGVzYyA9IGt6YWxsb2Moc2l6ZW9mKCpm ZGVzYykgKworCQkJc2l6ZW9mKHN0cnVjdCBzdF9mZG1hX3N3X25vZGUpICogc2dfbGVuLCBHRlBf Tk9XQUlUKTsKKwlpZiAoIWZkZXNjKQorCQlyZXR1cm4gTlVMTDsKKworCWZkZXNjLT5mY2hhbiA9 IGZjaGFuOworCWZkZXNjLT5uX25vZGVzID0gc2dfbGVuOworCWZvciAoaSA9IDA7IGkgPCBzZ19s ZW47IGkrKykgeworCQlmZGVzYy0+bm9kZVtpXS5kZXNjID0gZG1hX3Bvb2xfYWxsb2MoZmNoYW4t Pm5vZGVfcG9vbCwKKwkJCQlHRlBfTk9XQUlULCAmZmRlc2MtPm5vZGVbaV0ucGRlc2MpOworCQlp ZiAoIWZkZXNjLT5ub2RlW2ldLmRlc2MpCisJCQlnb3RvIGVycjsKKwl9CisJcmV0dXJuIGZkZXNj OworCitlcnI6CisJd2hpbGUgKC0taSA+PSAwKQorCQlkbWFfcG9vbF9mcmVlKGZjaGFuLT5ub2Rl X3Bvb2wsIGZkZXNjLT5ub2RlW2ldLmRlc2MsCisJCQkgICAgICBmZGVzYy0+bm9kZVtpXS5wZGVz Yyk7CisJa2ZyZWUoZmRlc2MpOworCXJldHVybiBOVUxMOworfQorCitzdGF0aWMgaW50IHN0X2Zk bWFfYWxsb2NfY2hhbl9yZXMoc3RydWN0IGRtYV9jaGFuICpjaGFuKQoreworCXN0cnVjdCBzdF9m ZG1hX2NoYW4gKmZjaGFuID0gdG9fc3RfZmRtYV9jaGFuKGNoYW4pOworCisJLyogQ3JlYXRlIHRo ZSBkbWEgcG9vbCBmb3IgZGVzY3JpcHRvciBhbGxvY2F0aW9uICovCisJZmNoYW4tPm5vZGVfcG9v bCA9IGRtYV9wb29sX2NyZWF0ZShkZXZfbmFtZSgmY2hhbi0+ZGV2LT5kZXZpY2UpLAorCQkJCQkg ICAgZmNoYW4tPmZkZXYtPmRldiwKKwkJCQkJICAgIHNpemVvZihzdHJ1Y3Qgc3RfZmRtYV9od19u b2RlKSwKKwkJCQkJICAgIF9fYWxpZ25vZl9fKHN0cnVjdCBzdF9mZG1hX2h3X25vZGUpLAorCQkJ CQkgICAgMCk7CisKKwlpZiAoIWZjaGFuLT5ub2RlX3Bvb2wpIHsKKwkJZGV2X2VycihmY2hhbi0+ ZmRldi0+ZGV2LCAidW5hYmxlIHRvIGFsbG9jYXRlIGRlc2MgcG9vbFxuIik7CisJCXJldHVybiAt RU5PTUVNOworCX0KKworCWRldl9kYmcoZmNoYW4tPmZkZXYtPmRldiwgImFsbG9jIGNoX2lkOiVk IHR5cGU6JWRcbiIsCisJCWZjaGFuLT52Y2hhbi5jaGFuLmNoYW5faWQsIGZjaGFuLT5jZmcudHlw ZSk7CisKKwlyZXR1cm4gMDsKK30KKworc3RhdGljIHZvaWQgc3RfZmRtYV9mcmVlX2NoYW5fcmVz KHN0cnVjdCBkbWFfY2hhbiAqY2hhbikKK3sKKwlzdHJ1Y3Qgc3RfZmRtYV9jaGFuICpmY2hhbiA9 IHRvX3N0X2ZkbWFfY2hhbihjaGFuKTsKKwlzdHJ1Y3QgcnByb2MgKnJwcm9jID0gZmNoYW4tPmZk ZXYtPnNsaW1fcnByb2MtPnJwcm9jOworCXVuc2lnbmVkIGxvbmcgZmxhZ3M7CisKKwlMSVNUX0hF QUQoaGVhZCk7CisKKwlkZXZfZGJnKGZjaGFuLT5mZGV2LT5kZXYsICIlczogZnJlZWluZyBjaGFu OiVkXG4iLAorCQlfX2Z1bmNfXywgZmNoYW4tPnZjaGFuLmNoYW4uY2hhbl9pZCk7CisKKwlpZiAo ZmNoYW4tPmNmZy50eXBlICE9IFNUX0ZETUFfVFlQRV9GUkVFX1JVTikKKwkJc3RfZmRtYV9kcmVx X3B1dChmY2hhbik7CisKKwlzcGluX2xvY2tfaXJxc2F2ZSgmZmNoYW4tPnZjaGFuLmxvY2ssIGZs YWdzKTsKKwlmY2hhbi0+ZmRlc2MgPSBOVUxMOworCXNwaW5fdW5sb2NrX2lycXJlc3RvcmUoJmZj aGFuLT52Y2hhbi5sb2NrLCBmbGFncyk7CisKKwlkbWFfcG9vbF9kZXN0cm95KGZjaGFuLT5ub2Rl X3Bvb2wpOworCWZjaGFuLT5ub2RlX3Bvb2wgPSBOVUxMOworCW1lbXNldCgmZmNoYW4tPmNmZywg MCwgc2l6ZW9mKHN0cnVjdCBzdF9mZG1hX2NmZykpOworCisJcnByb2Nfc2h1dGRvd24ocnByb2Mp OworfQorCitzdGF0aWMgc3RydWN0IGRtYV9hc3luY190eF9kZXNjcmlwdG9yICpzdF9mZG1hX3By ZXBfZG1hX21lbWNweSgKKwlzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4sCWRtYV9hZGRyX3QgZHN0LCBk bWFfYWRkcl90IHNyYywKKwlzaXplX3QgbGVuLCB1bnNpZ25lZCBsb25nIGZsYWdzKQoreworCXN0 cnVjdCBzdF9mZG1hX2NoYW4gKmZjaGFuOworCXN0cnVjdCBzdF9mZG1hX2Rlc2MgKmZkZXNjOwor CXN0cnVjdCBzdF9mZG1hX2h3X25vZGUgKmh3X25vZGU7CisKKwlpZiAoIWxlbikKKwkJcmV0dXJu IE5VTEw7CisKKwlmY2hhbiA9IHRvX3N0X2ZkbWFfY2hhbihjaGFuKTsKKworCS8qIFdlIG9ubHkg cmVxdWlyZSBhIHNpbmdsZSBkZXNjcmlwdG9yICovCisJZmRlc2MgPSBzdF9mZG1hX2FsbG9jX2Rl c2MoZmNoYW4sIDEpOworCWlmICghZmRlc2MpIHsKKwkJZGV2X2VycihmY2hhbi0+ZmRldi0+ZGV2 LCAibm8gbWVtb3J5IGZvciBkZXNjXG4iKTsKKwkJcmV0dXJuIE5VTEw7CisJfQorCisJaHdfbm9k ZSA9IGZkZXNjLT5ub2RlWzBdLmRlc2M7CisJaHdfbm9kZS0+bmV4dCA9IDA7CisJaHdfbm9kZS0+ Y29udHJvbCA9IEZETUFfTk9ERV9DVFJMX1JFUV9NQVBfRlJFRV9SVU47CisJaHdfbm9kZS0+Y29u dHJvbCB8PSBGRE1BX05PREVfQ1RSTF9TUkNfSU5DUjsKKwlod19ub2RlLT5jb250cm9sIHw9IEZE TUFfTk9ERV9DVFJMX0RTVF9JTkNSOworCWh3X25vZGUtPmNvbnRyb2wgfD0gRkRNQV9OT0RFX0NU UkxfSU5UX0VPTjsKKwlod19ub2RlLT5uYnl0ZXMgPSBsZW47CisJaHdfbm9kZS0+c2FkZHIgPSBz cmM7CisJaHdfbm9kZS0+ZGFkZHIgPSBkc3Q7CisJaHdfbm9kZS0+Z2VuZXJpYy5sZW5ndGggPSBs ZW47CisJaHdfbm9kZS0+Z2VuZXJpYy5zc3RyaWRlID0gMDsKKwlod19ub2RlLT5nZW5lcmljLmRz dHJpZGUgPSAwOworCisJcmV0dXJuIHZjaGFuX3R4X3ByZXAoJmZjaGFuLT52Y2hhbiwgJmZkZXNj LT52ZGVzYywgZmxhZ3MpOworfQorCitzdGF0aWMgaW50IGNvbmZpZ19yZXFjdHJsKHN0cnVjdCBz dF9mZG1hX2NoYW4gKmZjaGFuLAorCQkJICBlbnVtIGRtYV90cmFuc2Zlcl9kaXJlY3Rpb24gZGly ZWN0aW9uKQoreworCXUzMiBtYXhidXJzdCA9IDAsIGFkZHIgPSAwOworCWVudW0gZG1hX3NsYXZl X2J1c3dpZHRoIHdpZHRoOworCWludCBjaF9pZCA9IGZjaGFuLT52Y2hhbi5jaGFuLmNoYW5faWQ7 CisJc3RydWN0IHN0X2ZkbWFfZGV2ICpmZGV2ID0gZmNoYW4tPmZkZXY7CisKKwlzd2l0Y2ggKGRp cmVjdGlvbikgeworCisJY2FzZSBETUFfREVWX1RPX01FTToKKwkJZmNoYW4tPmNmZy5yZXFfY3Ry bCAmPSB+RkRNQV9SRVFfQ1RSTF9XTlI7CisJCW1heGJ1cnN0ID0gZmNoYW4tPnNjZmcuc3JjX21h eGJ1cnN0OworCQl3aWR0aCA9IGZjaGFuLT5zY2ZnLnNyY19hZGRyX3dpZHRoOworCQlhZGRyID0g ZmNoYW4tPnNjZmcuc3JjX2FkZHI7CisJCWJyZWFrOworCisJY2FzZSBETUFfTUVNX1RPX0RFVjoK KwkJZmNoYW4tPmNmZy5yZXFfY3RybCB8PSBGRE1BX1JFUV9DVFJMX1dOUjsKKwkJbWF4YnVyc3Qg PSBmY2hhbi0+c2NmZy5kc3RfbWF4YnVyc3Q7CisJCXdpZHRoID0gZmNoYW4tPnNjZmcuZHN0X2Fk ZHJfd2lkdGg7CisJCWFkZHIgPSBmY2hhbi0+c2NmZy5kc3RfYWRkcjsKKwkJYnJlYWs7CisKKwlk ZWZhdWx0OgorCQlyZXR1cm4gLUVJTlZBTDsKKwl9CisKKwlmY2hhbi0+Y2ZnLnJlcV9jdHJsICY9 IH5GRE1BX1JFUV9DVFJMX09QQ09ERV9NQVNLOworCisJc3dpdGNoICh3aWR0aCkgeworCisJY2Fz ZSBETUFfU0xBVkVfQlVTV0lEVEhfMV9CWVRFOgorCQlmY2hhbi0+Y2ZnLnJlcV9jdHJsIHw9IEZE TUFfUkVRX0NUUkxfT1BDT0RFX0xEX1NUMTsKKwkJYnJlYWs7CisKKwljYXNlIERNQV9TTEFWRV9C VVNXSURUSF8yX0JZVEVTOgorCQlmY2hhbi0+Y2ZnLnJlcV9jdHJsIHw9IEZETUFfUkVRX0NUUkxf T1BDT0RFX0xEX1NUMjsKKwkJYnJlYWs7CisKKwljYXNlIERNQV9TTEFWRV9CVVNXSURUSF80X0JZ VEVTOgorCQlmY2hhbi0+Y2ZnLnJlcV9jdHJsIHw9IEZETUFfUkVRX0NUUkxfT1BDT0RFX0xEX1NU NDsKKwkJYnJlYWs7CisKKwljYXNlIERNQV9TTEFWRV9CVVNXSURUSF84X0JZVEVTOgorCQlmY2hh bi0+Y2ZnLnJlcV9jdHJsIHw9IEZETUFfUkVRX0NUUkxfT1BDT0RFX0xEX1NUODsKKwkJYnJlYWs7 CisKKwlkZWZhdWx0OgorCQlyZXR1cm4gLUVJTlZBTDsKKwl9CisKKwlmY2hhbi0+Y2ZnLnJlcV9j dHJsICY9IH5GRE1BX1JFUV9DVFJMX05VTV9PUFNfTUFTSzsKKwlmY2hhbi0+Y2ZnLnJlcV9jdHJs IHw9IEZETUFfUkVRX0NUUkxfTlVNX09QUyhtYXhidXJzdC0xKTsKKwlkcmVxX3dyaXRlKGZjaGFu LCBmY2hhbi0+Y2ZnLnJlcV9jdHJsLCBGRE1BX1JFUV9DVFJMX09GU1QpOworCisJZmNoYW4tPmNm Zy5kZXZfYWRkciA9IGFkZHI7CisJZmNoYW4tPmNmZy5kaXIgPSBkaXJlY3Rpb247CisKKwlkZXZf ZGJnKGZkZXYtPmRldiwgImNoYW46JWQgY29uZmlnX3JlcWN0cmw6JSN4IHJlcV9jdHJsOiUjbHhc biIsCisJCWNoX2lkLCBhZGRyLCBmY2hhbi0+Y2ZnLnJlcV9jdHJsKTsKKworCXJldHVybiAwOwor fQorCitzdGF0aWMgdm9pZCBmaWxsX2h3X25vZGUoc3RydWN0IHN0X2ZkbWFfaHdfbm9kZSAqaHdf bm9kZSwKKwkJCXN0cnVjdCBzdF9mZG1hX2NoYW4gKmZjaGFuLAorCQkJZW51bSBkbWFfdHJhbnNm ZXJfZGlyZWN0aW9uIGRpcmVjdGlvbikKK3sKKwlpZiAoZGlyZWN0aW9uID09IERNQV9NRU1fVE9f REVWKSB7CisJCWh3X25vZGUtPmNvbnRyb2wgfD0gRkRNQV9OT0RFX0NUUkxfU1JDX0lOQ1I7CisJ CWh3X25vZGUtPmNvbnRyb2wgfD0gRkRNQV9OT0RFX0NUUkxfRFNUX1NUQVRJQzsKKwkJaHdfbm9k ZS0+ZGFkZHIgPSBmY2hhbi0+Y2ZnLmRldl9hZGRyOworCX0gZWxzZSB7CisJCWh3X25vZGUtPmNv bnRyb2wgfD0gRkRNQV9OT0RFX0NUUkxfU1JDX1NUQVRJQzsKKwkJaHdfbm9kZS0+Y29udHJvbCB8 PSBGRE1BX05PREVfQ1RSTF9EU1RfSU5DUjsKKwkJaHdfbm9kZS0+c2FkZHIgPSBmY2hhbi0+Y2Zn LmRldl9hZGRyOworCX0KKworCWh3X25vZGUtPmdlbmVyaWMuc3N0cmlkZSA9IDA7CisJaHdfbm9k ZS0+Z2VuZXJpYy5kc3RyaWRlID0gMDsKK30KKworc3RhdGljIGlubGluZSBzdHJ1Y3Qgc3RfZmRt YV9jaGFuICpzdF9mZG1hX3ByZXBfY29tbW9uKHN0cnVjdCBkbWFfY2hhbiAqY2hhbiwKKwkJc2l6 ZV90IGxlbiwgZW51bSBkbWFfdHJhbnNmZXJfZGlyZWN0aW9uIGRpcmVjdGlvbikKK3sKKwlzdHJ1 Y3Qgc3RfZmRtYV9jaGFuICpmY2hhbjsKKworCWlmICghY2hhbiB8fCAhbGVuKQorCQlyZXR1cm4g TlVMTDsKKworCWZjaGFuID0gdG9fc3RfZmRtYV9jaGFuKGNoYW4pOworCisJaWYgKCFpc19zbGF2 ZV9kaXJlY3Rpb24oZGlyZWN0aW9uKSkgeworCQlkZXZfZXJyKGZjaGFuLT5mZGV2LT5kZXYsICJi YWQgZGlyZWN0aW9uP1xuIik7CisJCXJldHVybiBOVUxMOworCX0KKworCXJldHVybiBmY2hhbjsK K30KKworc3RhdGljIHN0cnVjdCBkbWFfYXN5bmNfdHhfZGVzY3JpcHRvciAqc3RfZmRtYV9wcmVw X2RtYV9jeWNsaWMoCisJCXN0cnVjdCBkbWFfY2hhbiAqY2hhbiwgZG1hX2FkZHJfdCBidWZfYWRk ciwgc2l6ZV90IGxlbiwKKwkJc2l6ZV90IHBlcmlvZF9sZW4sIGVudW0gZG1hX3RyYW5zZmVyX2Rp cmVjdGlvbiBkaXJlY3Rpb24sCisJCXVuc2lnbmVkIGxvbmcgZmxhZ3MpCit7CisJc3RydWN0IHN0 X2ZkbWFfY2hhbiAqZmNoYW47CisJc3RydWN0IHN0X2ZkbWFfZGVzYyAqZmRlc2M7CisJaW50IHNn X2xlbiwgaTsKKworCWZjaGFuID0gc3RfZmRtYV9wcmVwX2NvbW1vbihjaGFuLCBsZW4sIGRpcmVj dGlvbik7CisJaWYgKCFmY2hhbikKKwkJcmV0dXJuIE5VTEw7CisKKwlpZiAoIXBlcmlvZF9sZW4p CisJCXJldHVybiBOVUxMOworCisJaWYgKGNvbmZpZ19yZXFjdHJsKGZjaGFuLCBkaXJlY3Rpb24p KSB7CisJCWRldl9lcnIoZmNoYW4tPmZkZXYtPmRldiwgImJhZCB3aWR0aCBvciBkaXJlY3Rpb25c biIpOworCQlyZXR1cm4gTlVMTDsKKwl9CisKKwkvKiB0aGUgYnVmZmVyIGxlbmd0aCBtdXN0IGJl IGEgbXVsdGlwbGUgb2YgcGVyaW9kX2xlbiAqLworCWlmIChsZW4gJSBwZXJpb2RfbGVuICE9IDAp IHsKKwkJZGV2X2VycihmY2hhbi0+ZmRldi0+ZGV2LCAibGVuIGlzIG5vdCBtdWx0aXBsZSBvZiBw ZXJpb2RcbiIpOworCQlyZXR1cm4gTlVMTDsKKwl9CisKKwlzZ19sZW4gPSBsZW4gLyBwZXJpb2Rf bGVuOworCWZkZXNjID0gc3RfZmRtYV9hbGxvY19kZXNjKGZjaGFuLCBzZ19sZW4pOworCWlmICgh ZmRlc2MpIHsKKwkJZGV2X2VycihmY2hhbi0+ZmRldi0+ZGV2LCAibm8gbWVtb3J5IGZvciBkZXNj XG4iKTsKKwkJcmV0dXJuIE5VTEw7CisJfQorCisJZmRlc2MtPmlzY3ljbGljID0gdHJ1ZTsKKwor CWZvciAoaSA9IDA7IGkgPCBzZ19sZW47IGkrKykgeworCQlzdHJ1Y3Qgc3RfZmRtYV9od19ub2Rl ICpod19ub2RlID0gZmRlc2MtPm5vZGVbaV0uZGVzYzsKKworCQlod19ub2RlLT5uZXh0ID0gZmRl c2MtPm5vZGVbKGkgKyAxKSAlIHNnX2xlbl0ucGRlc2M7CisKKwkJaHdfbm9kZS0+Y29udHJvbCA9 CisJCQlGRE1BX05PREVfQ1RSTF9SRVFfTUFQX0RSRVEoZmNoYW4tPmRyZXFfbGluZSk7CisJCWh3 X25vZGUtPmNvbnRyb2wgfD0gRkRNQV9OT0RFX0NUUkxfSU5UX0VPTjsKKworCQlmaWxsX2h3X25v ZGUoaHdfbm9kZSwgZmNoYW4sIGRpcmVjdGlvbik7CisKKwkJaWYgKGRpcmVjdGlvbiA9PSBETUFf TUVNX1RPX0RFVikKKwkJCWh3X25vZGUtPnNhZGRyID0gYnVmX2FkZHIgKyAoaSAqIHBlcmlvZF9s ZW4pOworCQllbHNlCisJCQlod19ub2RlLT5kYWRkciA9IGJ1Zl9hZGRyICsgKGkgKiBwZXJpb2Rf bGVuKTsKKworCQlod19ub2RlLT5uYnl0ZXMgPSBwZXJpb2RfbGVuOworCQlod19ub2RlLT5nZW5l cmljLmxlbmd0aCA9IHBlcmlvZF9sZW47CisJfQorCisJcmV0dXJuIHZjaGFuX3R4X3ByZXAoJmZj aGFuLT52Y2hhbiwgJmZkZXNjLT52ZGVzYywgZmxhZ3MpOworfQorCitzdGF0aWMgc3RydWN0IGRt YV9hc3luY190eF9kZXNjcmlwdG9yICpzdF9mZG1hX3ByZXBfc2xhdmVfc2coCisJCXN0cnVjdCBk bWFfY2hhbiAqY2hhbiwgc3RydWN0IHNjYXR0ZXJsaXN0ICpzZ2wsCisJCXVuc2lnbmVkIGludCBz Z19sZW4sIGVudW0gZG1hX3RyYW5zZmVyX2RpcmVjdGlvbiBkaXJlY3Rpb24sCisJCXVuc2lnbmVk IGxvbmcgZmxhZ3MsIHZvaWQgKmNvbnRleHQpCit7CisJc3RydWN0IHN0X2ZkbWFfY2hhbiAqZmNo YW47CisJc3RydWN0IHN0X2ZkbWFfZGVzYyAqZmRlc2M7CisJc3RydWN0IHN0X2ZkbWFfaHdfbm9k ZSAqaHdfbm9kZTsKKwlzdHJ1Y3Qgc2NhdHRlcmxpc3QgKnNnOworCWludCBpOworCisJZmNoYW4g PSBzdF9mZG1hX3ByZXBfY29tbW9uKGNoYW4sIHNnX2xlbiwgZGlyZWN0aW9uKTsKKwlpZiAoIWZj aGFuKQorCQlyZXR1cm4gTlVMTDsKKworCWlmICghc2dsKQorCQlyZXR1cm4gTlVMTDsKKworCWZk ZXNjID0gc3RfZmRtYV9hbGxvY19kZXNjKGZjaGFuLCBzZ19sZW4pOworCWlmICghZmRlc2MpIHsK KwkJZGV2X2VycihmY2hhbi0+ZmRldi0+ZGV2LCAibm8gbWVtb3J5IGZvciBkZXNjXG4iKTsKKwkJ cmV0dXJuIE5VTEw7CisJfQorCisJZmRlc2MtPmlzY3ljbGljID0gZmFsc2U7CisKKwlmb3JfZWFj aF9zZyhzZ2wsIHNnLCBzZ19sZW4sIGkpIHsKKwkJaHdfbm9kZSA9IGZkZXNjLT5ub2RlW2ldLmRl c2M7CisKKwkJaHdfbm9kZS0+bmV4dCA9IGZkZXNjLT5ub2RlWyhpICsgMSkgJSBzZ19sZW5dLnBk ZXNjOworCQlod19ub2RlLT5jb250cm9sID0gRkRNQV9OT0RFX0NUUkxfUkVRX01BUF9EUkVRKGZj aGFuLT5kcmVxX2xpbmUpOworCisJCWZpbGxfaHdfbm9kZShod19ub2RlLCBmY2hhbiwgZGlyZWN0 aW9uKTsKKworCQlpZiAoZGlyZWN0aW9uID09IERNQV9NRU1fVE9fREVWKQorCQkJaHdfbm9kZS0+ c2FkZHIgPSBzZ19kbWFfYWRkcmVzcyhzZyk7CisJCWVsc2UKKwkJCWh3X25vZGUtPmRhZGRyID0g c2dfZG1hX2FkZHJlc3Moc2cpOworCisJCWh3X25vZGUtPm5ieXRlcyA9IHNnX2RtYV9sZW4oc2cp OworCQlod19ub2RlLT5nZW5lcmljLmxlbmd0aCA9IHNnX2RtYV9sZW4oc2cpOworCX0KKworCS8q IGludGVycnVwdCBhdCBlbmQgb2YgbGFzdCBub2RlICovCisJaHdfbm9kZS0+Y29udHJvbCB8PSBG RE1BX05PREVfQ1RSTF9JTlRfRU9OOworCisJcmV0dXJuIHZjaGFuX3R4X3ByZXAoJmZjaGFuLT52 Y2hhbiwgJmZkZXNjLT52ZGVzYywgZmxhZ3MpOworfQorCitzdGF0aWMgc2l6ZV90IHN0X2ZkbWFf ZGVzY19yZXNpZHVlKHN0cnVjdCBzdF9mZG1hX2NoYW4gKmZjaGFuLAorCQkJCSAgIHN0cnVjdCB2 aXJ0X2RtYV9kZXNjICp2ZGVzYywKKwkJCQkgICBib29sIGluX3Byb2dyZXNzKQoreworCXN0cnVj dCBzdF9mZG1hX2Rlc2MgKmZkZXNjID0gZmNoYW4tPmZkZXNjOworCXNpemVfdCByZXNpZHVlID0g MDsKKwlkbWFfYWRkcl90IGN1cl9hZGRyID0gMDsKKwlpbnQgaTsKKworCWlmIChpbl9wcm9ncmVz cykgeworCQljdXJfYWRkciA9IGZjaGFuX3JlYWQoZmNoYW4sIEZETUFfQ0hfQ01EX09GU1QpOwor CQljdXJfYWRkciAmPSBGRE1BX0NIX0NNRF9EQVRBX01BU0s7CisJfQorCisJZm9yIChpID0gZmNo YW4tPmZkZXNjLT5uX25vZGVzIC0gMSA7IGkgPj0gMDsgaS0tKSB7CisJCWlmIChjdXJfYWRkciA9 PSBmZGVzYy0+bm9kZVtpXS5wZGVzYykgeworCQkJcmVzaWR1ZSArPSBmbm9kZV9yZWFkKGZjaGFu LCBGRE1BX0NOVE5fT0ZTVCk7CisJCQlicmVhazsKKwkJfQorCQlyZXNpZHVlICs9IGZkZXNjLT5u b2RlW2ldLmRlc2MtPm5ieXRlczsKKwl9CisKKwlyZXR1cm4gcmVzaWR1ZTsKK30KKworc3RhdGlj IGVudW0gZG1hX3N0YXR1cyBzdF9mZG1hX3R4X3N0YXR1cyhzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4s CisJCQkJCSBkbWFfY29va2llX3QgY29va2llLAorCQkJCQkgc3RydWN0IGRtYV90eF9zdGF0ZSAq dHhzdGF0ZSkKK3sKKwlzdHJ1Y3Qgc3RfZmRtYV9jaGFuICpmY2hhbiA9IHRvX3N0X2ZkbWFfY2hh bihjaGFuKTsKKwlzdHJ1Y3QgdmlydF9kbWFfZGVzYyAqdmQ7CisJZW51bSBkbWFfc3RhdHVzIHJl dDsKKwl1bnNpZ25lZCBsb25nIGZsYWdzOworCisJcmV0ID0gZG1hX2Nvb2tpZV9zdGF0dXMoY2hh biwgY29va2llLCB0eHN0YXRlKTsKKwlpZiAocmV0ID09IERNQV9DT01QTEVURSB8fCAhdHhzdGF0 ZSkKKwkJcmV0dXJuIHJldDsKKworCXNwaW5fbG9ja19pcnFzYXZlKCZmY2hhbi0+dmNoYW4ubG9j aywgZmxhZ3MpOworCXZkID0gdmNoYW5fZmluZF9kZXNjKCZmY2hhbi0+dmNoYW4sIGNvb2tpZSk7 CisJaWYgKGZjaGFuLT5mZGVzYyAmJiBjb29raWUgPT0gZmNoYW4tPmZkZXNjLT52ZGVzYy50eC5j b29raWUpCisJCXR4c3RhdGUtPnJlc2lkdWUgPSBzdF9mZG1hX2Rlc2NfcmVzaWR1ZShmY2hhbiwg dmQsIHRydWUpOworCWVsc2UgaWYgKHZkKQorCQl0eHN0YXRlLT5yZXNpZHVlID0gc3RfZmRtYV9k ZXNjX3Jlc2lkdWUoZmNoYW4sIHZkLCBmYWxzZSk7CisJZWxzZQorCQl0eHN0YXRlLT5yZXNpZHVl ID0gMDsKKworCXNwaW5fdW5sb2NrX2lycXJlc3RvcmUoJmZjaGFuLT52Y2hhbi5sb2NrLCBmbGFn cyk7CisKKwlyZXR1cm4gcmV0OworfQorCitzdGF0aWMgdm9pZCBzdF9mZG1hX2lzc3VlX3BlbmRp bmcoc3RydWN0IGRtYV9jaGFuICpjaGFuKQoreworCXN0cnVjdCBzdF9mZG1hX2NoYW4gKmZjaGFu ID0gdG9fc3RfZmRtYV9jaGFuKGNoYW4pOworCXVuc2lnbmVkIGxvbmcgZmxhZ3M7CisKKwlzcGlu X2xvY2tfaXJxc2F2ZSgmZmNoYW4tPnZjaGFuLmxvY2ssIGZsYWdzKTsKKworCWlmICh2Y2hhbl9p c3N1ZV9wZW5kaW5nKCZmY2hhbi0+dmNoYW4pICYmICFmY2hhbi0+ZmRlc2MpCisJCXN0X2ZkbWFf eGZlcl9kZXNjKGZjaGFuKTsKKworCXNwaW5fdW5sb2NrX2lycXJlc3RvcmUoJmZjaGFuLT52Y2hh bi5sb2NrLCBmbGFncyk7Cit9CisKK3N0YXRpYyBpbnQgc3RfZmRtYV9wYXVzZShzdHJ1Y3QgZG1h X2NoYW4gKmNoYW4pCit7CisJdW5zaWduZWQgbG9uZyBmbGFnczsKKwlMSVNUX0hFQUQoaGVhZCk7 CisJc3RydWN0IHN0X2ZkbWFfY2hhbiAqZmNoYW4gPSB0b19zdF9mZG1hX2NoYW4oY2hhbik7CisJ aW50IGNoX2lkID0gZmNoYW4tPnZjaGFuLmNoYW4uY2hhbl9pZDsKKwl1bnNpZ25lZCBsb25nIGNt ZCA9IEZETUFfQ01EX1BBVVNFKGNoX2lkKTsKKworCWRldl9kYmcoZmNoYW4tPmZkZXYtPmRldiwg InBhdXNlIGNoYW46JWRcbiIsIGNoX2lkKTsKKworCXNwaW5fbG9ja19pcnFzYXZlKCZmY2hhbi0+ dmNoYW4ubG9jaywgZmxhZ3MpOworCWlmIChmY2hhbi0+ZmRlc2MpCisJCWZkbWFfd3JpdGUoZmNo YW4tPmZkZXYsIGNtZCwgRkRNQV9DTURfU0VUX09GU1QpOworCXNwaW5fdW5sb2NrX2lycXJlc3Rv cmUoJmZjaGFuLT52Y2hhbi5sb2NrLCBmbGFncyk7CisKKwlyZXR1cm4gMDsKK30KKworc3RhdGlj IGludCBzdF9mZG1hX3Jlc3VtZShzdHJ1Y3QgZG1hX2NoYW4gKmNoYW4pCit7CisJdW5zaWduZWQg bG9uZyBmbGFnczsKKwl1bnNpZ25lZCBsb25nIHZhbDsKKwlzdHJ1Y3Qgc3RfZmRtYV9jaGFuICpm Y2hhbiA9IHRvX3N0X2ZkbWFfY2hhbihjaGFuKTsKKwlpbnQgY2hfaWQgPSBmY2hhbi0+dmNoYW4u Y2hhbi5jaGFuX2lkOworCisJZGV2X2RiZyhmY2hhbi0+ZmRldi0+ZGV2LCAicmVzdW1lIGNoYW46 JWRcbiIsIGNoX2lkKTsKKworCXNwaW5fbG9ja19pcnFzYXZlKCZmY2hhbi0+dmNoYW4ubG9jaywg ZmxhZ3MpOworCWlmIChmY2hhbi0+ZmRlc2MpIHsKKwkJdmFsID0gZmNoYW5fcmVhZChmY2hhbiwg RkRNQV9DSF9DTURfT0ZTVCk7CisJCXZhbCAmPSBGRE1BX0NIX0NNRF9EQVRBX01BU0s7CisJCWZj aGFuX3dyaXRlKGZjaGFuLCB2YWwsIEZETUFfQ0hfQ01EX09GU1QpOworCX0KKwlzcGluX3VubG9j a19pcnFyZXN0b3JlKCZmY2hhbi0+dmNoYW4ubG9jaywgZmxhZ3MpOworCisJcmV0dXJuIDA7Cit9 CisKK3N0YXRpYyBpbnQgc3RfZmRtYV90ZXJtaW5hdGVfYWxsKHN0cnVjdCBkbWFfY2hhbiAqY2hh bikKK3sKKwl1bnNpZ25lZCBsb25nIGZsYWdzOworCUxJU1RfSEVBRChoZWFkKTsKKwlzdHJ1Y3Qg c3RfZmRtYV9jaGFuICpmY2hhbiA9IHRvX3N0X2ZkbWFfY2hhbihjaGFuKTsKKwlpbnQgY2hfaWQg PSBmY2hhbi0+dmNoYW4uY2hhbi5jaGFuX2lkOworCXVuc2lnbmVkIGxvbmcgY21kID0gRkRNQV9D TURfUEFVU0UoY2hfaWQpOworCisJZGV2X2RiZyhmY2hhbi0+ZmRldi0+ZGV2LCAidGVybWluYXRl IGNoYW46JWRcbiIsIGNoX2lkKTsKKworCXNwaW5fbG9ja19pcnFzYXZlKCZmY2hhbi0+dmNoYW4u bG9jaywgZmxhZ3MpOworCWZkbWFfd3JpdGUoZmNoYW4tPmZkZXYsIGNtZCwgRkRNQV9DTURfU0VU X09GU1QpOworCWZjaGFuLT5mZGVzYyA9IE5VTEw7CisJdmNoYW5fZ2V0X2FsbF9kZXNjcmlwdG9y cygmZmNoYW4tPnZjaGFuLCAmaGVhZCk7CisJc3Bpbl91bmxvY2tfaXJxcmVzdG9yZSgmZmNoYW4t PnZjaGFuLmxvY2ssIGZsYWdzKTsKKwl2Y2hhbl9kbWFfZGVzY19mcmVlX2xpc3QoJmZjaGFuLT52 Y2hhbiwgJmhlYWQpOworCisJcmV0dXJuIDA7Cit9CisKK3N0YXRpYyBpbnQgc3RfZmRtYV9zbGF2 ZV9jb25maWcoc3RydWN0IGRtYV9jaGFuICpjaGFuLAorCQkJCXN0cnVjdCBkbWFfc2xhdmVfY29u ZmlnICpzbGF2ZV9jZmcpCit7CisJc3RydWN0IHN0X2ZkbWFfY2hhbiAqZmNoYW4gPSB0b19zdF9m ZG1hX2NoYW4oY2hhbik7CisKKwltZW1jcHkoJmZjaGFuLT5zY2ZnLCBzbGF2ZV9jZmcsIHNpemVv ZihmY2hhbi0+c2NmZykpOworCXJldHVybiAwOworfQorCitzdGF0aWMgY29uc3Qgc3RydWN0IHN0 X2ZkbWFfZHJpdmVyZGF0YSBmZG1hX21wZTMxX3N0aWg0MDdfMTEgPSB7CisJLm5hbWUgPSAiU1Rp SDQwNyIsCisJLmlkID0gMCwKK307CisKK3N0YXRpYyBjb25zdCBzdHJ1Y3Qgc3RfZmRtYV9kcml2 ZXJkYXRhIGZkbWFfbXBlMzFfc3RpaDQwN18xMiA9IHsKKwkubmFtZSA9ICJTVGlINDA3IiwKKwku aWQgPSAxLAorfTsKKworc3RhdGljIGNvbnN0IHN0cnVjdCBzdF9mZG1hX2RyaXZlcmRhdGEgZmRt YV9tcGUzMV9zdGloNDA3XzEzID0geworCS5uYW1lID0gIlNUaUg0MDciLAorCS5pZCA9IDIsCit9 OworCitzdGF0aWMgY29uc3Qgc3RydWN0IG9mX2RldmljZV9pZCBzdF9mZG1hX21hdGNoW10gPSB7 CisJeyAuY29tcGF0aWJsZSA9ICJzdCxzdGloNDA3LWZkbWEtbXBlMzEtMTEiCisJICAsIC5kYXRh ID0gJmZkbWFfbXBlMzFfc3RpaDQwN18xMSB9LAorCXsgLmNvbXBhdGlibGUgPSAic3Qsc3RpaDQw Ny1mZG1hLW1wZTMxLTEyIgorCSAgLCAuZGF0YSA9ICZmZG1hX21wZTMxX3N0aWg0MDdfMTIgfSwK Kwl7IC5jb21wYXRpYmxlID0gInN0LHN0aWg0MDctZmRtYS1tcGUzMS0xMyIKKwkgICwgLmRhdGEg PSAmZmRtYV9tcGUzMV9zdGloNDA3XzEzIH0sCisJe30sCit9OworTU9EVUxFX0RFVklDRV9UQUJM RShvZiwgc3RfZmRtYV9tYXRjaCk7CisKK3N0YXRpYyBpbnQgc3RfZmRtYV9wYXJzZV9kdChzdHJ1 Y3QgcGxhdGZvcm1fZGV2aWNlICpwZGV2LAorCQkJY29uc3Qgc3RydWN0IHN0X2ZkbWFfZHJpdmVy ZGF0YSAqZHJ2ZGF0YSwKKwkJCXN0cnVjdCBzdF9mZG1hX2RldiAqZmRldikKK3sKKwlzdHJ1Y3Qg ZGV2aWNlX25vZGUgKm5wID0gcGRldi0+ZGV2Lm9mX25vZGU7CisJaW50IHJldDsKKworCWlmICgh bnApCisJCWdvdG8gZXJyOworCisJcmV0ID0gb2ZfcHJvcGVydHlfcmVhZF91MzIobnAsICJkbWEt Y2hhbm5lbHMiLCAmZmRldi0+bnJfY2hhbm5lbHMpOworCWlmIChyZXQpCisJCWdvdG8gZXJyOwor CisJc25wcmludGYoZmRldi0+ZndfbmFtZSwgRldfTkFNRV9TSVpFLCAiZmRtYV8lc18lZC5lbGYi LAorCQlkcnZkYXRhLT5uYW1lLCBkcnZkYXRhLT5pZCk7CisKK2VycjoKKwlyZXR1cm4gcmV0Owor fQorI2RlZmluZSBGRE1BX0RNQV9CVVNXSURUSFMJKEJJVChETUFfU0xBVkVfQlVTV0lEVEhfMV9C WVRFKSB8IFwKKwkJCQkgQklUKERNQV9TTEFWRV9CVVNXSURUSF8yX0JZVEVTKSB8IFwKKwkJCQkg QklUKERNQV9TTEFWRV9CVVNXSURUSF8zX0JZVEVTKSB8IFwKKwkJCQkgQklUKERNQV9TTEFWRV9C VVNXSURUSF80X0JZVEVTKSkKKworc3RhdGljIGludCBzdF9mZG1hX3Byb2JlKHN0cnVjdCBwbGF0 Zm9ybV9kZXZpY2UgKnBkZXYpCit7CisJc3RydWN0IHN0X2ZkbWFfZGV2ICpmZGV2OworCWNvbnN0 IHN0cnVjdCBvZl9kZXZpY2VfaWQgKm1hdGNoOworCXN0cnVjdCBkZXZpY2Vfbm9kZSAqbnAgPSBw ZGV2LT5kZXYub2Zfbm9kZTsKKwljb25zdCBzdHJ1Y3Qgc3RfZmRtYV9kcml2ZXJkYXRhICpkcnZk YXRhOworCWludCByZXQsIGk7CisKKwltYXRjaCA9IG9mX21hdGNoX2RldmljZSgoc3RfZmRtYV9t YXRjaCksICZwZGV2LT5kZXYpOworCWlmICghbWF0Y2ggfHwgIW1hdGNoLT5kYXRhKSB7CisJCWRl dl9lcnIoJnBkZXYtPmRldiwgIk5vIGRldmljZSBtYXRjaCBmb3VuZFxuIik7CisJCXJldHVybiAt RU5PREVWOworCX0KKworCWRydmRhdGEgPSBtYXRjaC0+ZGF0YTsKKworCWZkZXYgPSBkZXZtX2t6 YWxsb2MoJnBkZXYtPmRldiwgc2l6ZW9mKCpmZGV2KSwgR0ZQX0tFUk5FTCk7CisJaWYgKCFmZGV2 KQorCQlyZXR1cm4gLUVOT01FTTsKKworCXJldCA9IHN0X2ZkbWFfcGFyc2VfZHQocGRldiwgZHJ2 ZGF0YSwgZmRldik7CisJaWYgKHJldCkgeworCQlkZXZfZXJyKCZwZGV2LT5kZXYsICJ1bmFibGUg dG8gZmluZCBwbGF0Zm9ybSBkYXRhXG4iKTsKKwkJZ290byBlcnI7CisJfQorCisJZmRldi0+Y2hh bnMgPSBkZXZtX2t6YWxsb2MoJnBkZXYtPmRldiwKKwkJCQkgICBmZGV2LT5ucl9jaGFubmVscwor CQkJCSAgICogc2l6ZW9mKHN0cnVjdCBzdF9mZG1hX2NoYW4pLCBHRlBfS0VSTkVMKTsKKwlpZiAo IWZkZXYtPmNoYW5zKQorCQlyZXR1cm4gLUVOT01FTTsKKworCWZkZXYtPmRldiA9ICZwZGV2LT5k ZXY7CisJZmRldi0+ZHJ2ZGF0YSA9IGRydmRhdGE7CisJcGxhdGZvcm1fc2V0X2RydmRhdGEocGRl diwgZmRldik7CisKKwlmZGV2LT5pcnEgPSBwbGF0Zm9ybV9nZXRfaXJxKHBkZXYsIDApOworCWlm IChmZGV2LT5pcnEgPCAwKSB7CisJCWRldl9lcnIoJnBkZXYtPmRldiwgIkZhaWxlZCB0byBnZXQg aXJxIHJlc291cmNlXG4iKTsKKwkJcmV0dXJuIC1FSU5WQUw7CisJfQorCisJcmV0ID0gZGV2bV9y ZXF1ZXN0X2lycSgmcGRldi0+ZGV2LCBmZGV2LT5pcnEsIHN0X2ZkbWFfaXJxX2hhbmRsZXIsIDAs CisJCQkgICAgICAgZGV2X25hbWUoJnBkZXYtPmRldiksIGZkZXYpOworCWlmIChyZXQpIHsKKwkJ ZGV2X2VycigmcGRldi0+ZGV2LCAiRmFpbGVkIHRvIHJlcXVlc3QgaXJxICglZClcbiIsIHJldCk7 CisJCWdvdG8gZXJyOworCX0KKworCWZkZXYtPnNsaW1fcnByb2MgPSBzdF9zbGltX3Jwcm9jX2Fs bG9jKHBkZXYsIGZkZXYtPmZ3X25hbWUpOworCWlmICghZmRldi0+c2xpbV9ycHJvYykgeworCQly ZXQgPSBQVFJfRVJSKGZkZXYtPnNsaW1fcnByb2MpOworCQlkZXZfZXJyKCZwZGV2LT5kZXYsICJz bGltX3Jwcm9jX2FsbG9jIGZhaWxlZCAoJWQpXG4iLCByZXQpOworCQlnb3RvIGVycjsKKwl9CisK KwkvKiBJbml0aWFsaXNlIGxpc3Qgb2YgRkRNQSBjaGFubmVscyAqLworCUlOSVRfTElTVF9IRUFE KCZmZGV2LT5kbWFfZGV2aWNlLmNoYW5uZWxzKTsKKwlmb3IgKGkgPSAwOyBpIDwgZmRldi0+bnJf Y2hhbm5lbHM7IGkrKykgeworCQlzdHJ1Y3Qgc3RfZmRtYV9jaGFuICpmY2hhbiA9ICZmZGV2LT5j aGFuc1tpXTsKKworCQlmY2hhbi0+ZmRldiA9IGZkZXY7CisJCWZjaGFuLT52Y2hhbi5kZXNjX2Zy ZWUgPSBzdF9mZG1hX2ZyZWVfZGVzYzsKKwkJdmNoYW5faW5pdCgmZmNoYW4tPnZjaGFuLCAmZmRl di0+ZG1hX2RldmljZSk7CisJfQorCisJLyogSW5pdGlhbGlzZSB0aGUgRkRNQSBkcmVxIChyZXNl cnZlIDAgJiAzMSBmb3IgRkRNQSB1c2UpICovCisJZmRldi0+ZHJlcV9tYXNrID0gQklUKDApIHwg QklUKDMxKTsKKworCWRtYV9jYXBfc2V0KERNQV9TTEFWRSwgZmRldi0+ZG1hX2RldmljZS5jYXBf bWFzayk7CisJZG1hX2NhcF9zZXQoRE1BX0NZQ0xJQywgZmRldi0+ZG1hX2RldmljZS5jYXBfbWFz ayk7CisJZG1hX2NhcF9zZXQoRE1BX01FTUNQWSwgZmRldi0+ZG1hX2RldmljZS5jYXBfbWFzayk7 CisKKwlmZGV2LT5kbWFfZGV2aWNlLmRldiA9ICZwZGV2LT5kZXY7CisJZmRldi0+ZG1hX2Rldmlj ZS5kZXZpY2VfYWxsb2NfY2hhbl9yZXNvdXJjZXMgPSBzdF9mZG1hX2FsbG9jX2NoYW5fcmVzOwor CWZkZXYtPmRtYV9kZXZpY2UuZGV2aWNlX2ZyZWVfY2hhbl9yZXNvdXJjZXMgPSBzdF9mZG1hX2Zy ZWVfY2hhbl9yZXM7CisJZmRldi0+ZG1hX2RldmljZS5kZXZpY2VfcHJlcF9kbWFfY3ljbGljCT0g c3RfZmRtYV9wcmVwX2RtYV9jeWNsaWM7CisJZmRldi0+ZG1hX2RldmljZS5kZXZpY2VfcHJlcF9z bGF2ZV9zZyA9IHN0X2ZkbWFfcHJlcF9zbGF2ZV9zZzsKKwlmZGV2LT5kbWFfZGV2aWNlLmRldmlj ZV9wcmVwX2RtYV9tZW1jcHkgPSBzdF9mZG1hX3ByZXBfZG1hX21lbWNweTsKKwlmZGV2LT5kbWFf ZGV2aWNlLmRldmljZV90eF9zdGF0dXMgPSBzdF9mZG1hX3R4X3N0YXR1czsKKwlmZGV2LT5kbWFf ZGV2aWNlLmRldmljZV9pc3N1ZV9wZW5kaW5nID0gc3RfZmRtYV9pc3N1ZV9wZW5kaW5nOworCWZk ZXYtPmRtYV9kZXZpY2UuZGV2aWNlX3Rlcm1pbmF0ZV9hbGwgPSBzdF9mZG1hX3Rlcm1pbmF0ZV9h bGw7CisJZmRldi0+ZG1hX2RldmljZS5kZXZpY2VfY29uZmlnID0gc3RfZmRtYV9zbGF2ZV9jb25m aWc7CisJZmRldi0+ZG1hX2RldmljZS5kZXZpY2VfcGF1c2UgPSBzdF9mZG1hX3BhdXNlOworCWZk ZXYtPmRtYV9kZXZpY2UuZGV2aWNlX3Jlc3VtZSA9IHN0X2ZkbWFfcmVzdW1lOworCisJZmRldi0+ ZG1hX2RldmljZS5zcmNfYWRkcl93aWR0aHMgPSBGRE1BX0RNQV9CVVNXSURUSFM7CisJZmRldi0+ ZG1hX2RldmljZS5kc3RfYWRkcl93aWR0aHMgPSBGRE1BX0RNQV9CVVNXSURUSFM7CisJZmRldi0+ ZG1hX2RldmljZS5kaXJlY3Rpb25zID0gQklUKERNQV9ERVZfVE9fTUVNKSB8IEJJVChETUFfTUVN X1RPX0RFVik7CisJZmRldi0+ZG1hX2RldmljZS5yZXNpZHVlX2dyYW51bGFyaXR5ID0gRE1BX1JF U0lEVUVfR1JBTlVMQVJJVFlfQlVSU1Q7CisKKwlyZXQgPSBkbWFfYXN5bmNfZGV2aWNlX3JlZ2lz dGVyKCZmZGV2LT5kbWFfZGV2aWNlKTsKKwlpZiAocmV0KSB7CisJCWRldl9lcnIoJnBkZXYtPmRl diwKKwkJCSJGYWlsZWQgdG8gcmVnaXN0ZXIgRE1BIGRldmljZSAoJWQpXG4iLCByZXQpOworCQln b3RvIGVycl9ycHJvYzsKKwl9CisKKwlyZXQgPSBvZl9kbWFfY29udHJvbGxlcl9yZWdpc3Rlcihu cCwgc3RfZmRtYV9vZl94bGF0ZSwgZmRldik7CisJaWYgKHJldCkgeworCQlkZXZfZXJyKCZwZGV2 LT5kZXYsCisJCQkiRmFpbGVkIHRvIHJlZ2lzdGVyIGNvbnRyb2xsZXIgKCVkKVxuIiwgcmV0KTsK KwkJZ290byBlcnJfZG1hX2RldjsKKwl9CisKKwlkZXZfaW5mbygmcGRldi0+ZGV2LCAiU1QgRkRN QSBlbmdpbmUgZHJpdmVyLCBpcnE6JWRcbiIsIGZkZXYtPmlycSk7CisKKwlyZXR1cm4gMDsKKwor ZXJyX2RtYV9kZXY6CisJZG1hX2FzeW5jX2RldmljZV91bnJlZ2lzdGVyKCZmZGV2LT5kbWFfZGV2 aWNlKTsKK2Vycl9ycHJvYzoKKwlzdF9zbGltX3Jwcm9jX3B1dChmZGV2LT5zbGltX3Jwcm9jKTsK K2VycjoKKwlyZXR1cm4gcmV0OworfQorCitzdGF0aWMgaW50IHN0X2ZkbWFfcmVtb3ZlKHN0cnVj dCBwbGF0Zm9ybV9kZXZpY2UgKnBkZXYpCit7CisJc3RydWN0IHN0X2ZkbWFfZGV2ICpmZGV2ID0g cGxhdGZvcm1fZ2V0X2RydmRhdGEocGRldik7CisKKwlkZXZtX2ZyZWVfaXJxKCZwZGV2LT5kZXYs IGZkZXYtPmlycSwgZmRldik7CisJc3Rfc2xpbV9ycHJvY19wdXQoZmRldi0+c2xpbV9ycHJvYyk7 CisJb2ZfZG1hX2NvbnRyb2xsZXJfZnJlZShwZGV2LT5kZXYub2Zfbm9kZSk7CisJZG1hX2FzeW5j X2RldmljZV91bnJlZ2lzdGVyKCZmZGV2LT5kbWFfZGV2aWNlKTsKKworCXJldHVybiAwOworfQor CitzdGF0aWMgc3RydWN0IHBsYXRmb3JtX2RyaXZlciBzdF9mZG1hX3BsYXRmb3JtX2RyaXZlciA9 IHsKKwkuZHJpdmVyID0geworCQkubmFtZSA9ICJzdC1mZG1hIiwKKwkJLm9mX21hdGNoX3RhYmxl ID0gc3RfZmRtYV9tYXRjaCwKKwl9LAorCS5wcm9iZSA9IHN0X2ZkbWFfcHJvYmUsCisJLnJlbW92 ZSA9IHN0X2ZkbWFfcmVtb3ZlLAorfTsKK21vZHVsZV9wbGF0Zm9ybV9kcml2ZXIoc3RfZmRtYV9w bGF0Zm9ybV9kcml2ZXIpOworCitNT0RVTEVfTElDRU5TRSgiR1BMIHYyIik7CitNT0RVTEVfREVT Q1JJUFRJT04oIlNUTWljcm9lbGVjdHJvbmljcyBGRE1BIGVuZ2luZSBkcml2ZXIiKTsKK01PRFVM RV9BVVRIT1IoIkx1ZG92aWMuYmFycmUgPEx1ZG92aWMuYmFycmVAc3QuY29tPiIpOworTU9EVUxF X0FVVEhPUigiUGV0ZXIgR3JpZmZpbiA8cGV0ZXIuZ3JpZmZpbkBsaW5hcm8ub3JnPiIpOwotLSAK MS45LjEKCl9fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fCmRy aS1kZXZlbCBtYWlsaW5nIGxpc3QKZHJpLWRldmVsQGxpc3RzLmZyZWVkZXNrdG9wLm9yZwpodHRw czovL2xpc3RzLmZyZWVkZXNrdG9wLm9yZy9tYWlsbWFuL2xpc3RpbmZvL2RyaS1kZXZlbAo= From mboxrd@z Thu Jan 1 00:00:00 1970 From: peter.griffin@linaro.org (Peter Griffin) Date: Fri, 26 Aug 2016 15:56:40 +0100 Subject: [PATCH v8 05/18] dmaengine: st_fdma: Add STMicroelectronics FDMA engine driver support In-Reply-To: <1472223413-7254-1-git-send-email-peter.griffin@linaro.org> References: <1472223413-7254-1-git-send-email-peter.griffin@linaro.org> Message-ID: <1472223413-7254-6-git-send-email-peter.griffin@linaro.org> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org This patch adds support for the Flexible Direct Memory Access (FDMA) core driver. The FDMA is a slim core CPU with a dedicated firmware. It is a general purpose DMA controller capable of supporting 16 independent DMA channels. Data moves maybe from memory to memory or between memory and paced latency critical real time targets and it is found on al STi based chipsets. Signed-off-by: Ludovic Barre Signed-off-by: Peter Griffin --- drivers/dma/Kconfig | 14 +- drivers/dma/Makefile | 1 + drivers/dma/st_fdma.c | 880 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 894 insertions(+), 1 deletion(-) create mode 100644 drivers/dma/st_fdma.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 739f797..5b5a341 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -437,6 +437,19 @@ config STE_DMA40 help Support for ST-Ericsson DMA40 controller +config ST_FDMA + tristate "ST FDMA dmaengine support" + depends on ARCH_STI + select ST_SLIM_REMOTEPROC + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for ST FDMA controller. + It supports 16 independent DMA channels, accepts up to 32 DMA requests + + Say Y here if you have such a chipset. + If unsure, say N. + config STM32_DMA bool "STMicroelectronics STM32 DMA support" depends on ARCH_STM32 @@ -567,7 +580,6 @@ config ZX_DMA help Support the DMA engine for ZTE ZX296702 platform devices. - # driver files source "drivers/dma/bestcomm/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index e4dc9ca..a4fa336 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_ZX_DMA) += zx296702_dma.o +obj-$(CONFIG_ST_FDMA) += st_fdma.o obj-y += qcom/ obj-y += xilinx/ diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c new file mode 100644 index 0000000..bb8d8a7 --- /dev/null +++ b/drivers/dma/st_fdma.c @@ -0,0 +1,880 @@ +/* + * st_fdma.c + * + * Copyright (C) 2014 STMicroelectronics + * Author: Ludovic Barre + * Peter Griffin + * License terms: GNU General Public License (GPL), version 2 + */ +#include +#include +#include +#include +#include +#include +#include + +#include "st_fdma.h" + +static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c) +{ + return container_of(c, struct st_fdma_chan, vchan.chan); +} + +static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct st_fdma_desc, vdesc); +} + +static int st_fdma_dreq_get(struct st_fdma_chan *fchan) +{ + struct st_fdma_dev *fdev = fchan->fdev; + u32 req_line_cfg = fchan->cfg.req_line; + u32 dreq_line; + int try = 0; + + /* + * dreq_mask is shared for n channels of fdma, so all accesses must be + * atomic. if the dreq_mask is changed between ffz and set_bit, + * we retry + */ + do { + if (fdev->dreq_mask == ~0L) { + dev_err(fdev->dev, "No req lines available\n"); + return -EINVAL; + } + + if (try || req_line_cfg >= ST_FDMA_NR_DREQS) { + dev_err(fdev->dev, "Invalid or used req line\n"); + return -EINVAL; + } else { + dreq_line = req_line_cfg; + } + + try++; + } while (test_and_set_bit(dreq_line, &fdev->dreq_mask)); + + dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n", + dreq_line, fdev->dreq_mask); + + return dreq_line; +} + +static void st_fdma_dreq_put(struct st_fdma_chan *fchan) +{ + struct st_fdma_dev *fdev = fchan->fdev; + + dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line); + clear_bit(fchan->dreq_line, &fdev->dreq_mask); +} + +static void st_fdma_xfer_desc(struct st_fdma_chan *fchan) +{ + struct virt_dma_desc *vdesc; + unsigned long nbytes, ch_cmd, cmd; + + vdesc = vchan_next_desc(&fchan->vchan); + if (!vdesc) + return; + + fchan->fdesc = to_st_fdma_desc(vdesc); + nbytes = fchan->fdesc->node[0].desc->nbytes; + cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id); + ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START; + + /* start the channel for the descriptor */ + fnode_write(fchan, nbytes, FDMA_CNTN_OFST); + fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST); + writel(cmd, + fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST); + + dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id); +} + +static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan, + unsigned long int_sta) +{ + unsigned long ch_sta, ch_err; + int ch_id = fchan->vchan.chan.chan_id; + struct st_fdma_dev *fdev = fchan->fdev; + + ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST); + ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK; + ch_sta &= FDMA_CH_CMD_STA_MASK; + + if (int_sta & FDMA_INT_STA_ERR) { + dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err); + fchan->status = DMA_ERROR; + return; + } + + switch (ch_sta) { + case FDMA_CH_CMD_STA_PAUSED: + fchan->status = DMA_PAUSED; + break; + + case FDMA_CH_CMD_STA_RUNNING: + fchan->status = DMA_IN_PROGRESS; + break; + } +} + +static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id) +{ + struct st_fdma_dev *fdev = dev_id; + irqreturn_t ret = IRQ_NONE; + struct st_fdma_chan *fchan = &fdev->chans[0]; + unsigned long int_sta, clr; + + int_sta = fdma_read(fdev, FDMA_INT_STA_OFST); + clr = int_sta; + + for (; int_sta != 0 ; int_sta >>= 2, fchan++) { + if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR))) + continue; + + spin_lock(&fchan->vchan.lock); + st_fdma_ch_sta_update(fchan, int_sta); + + if (fchan->fdesc) { + if (!fchan->fdesc->iscyclic) { + list_del(&fchan->fdesc->vdesc.node); + vchan_cookie_complete(&fchan->fdesc->vdesc); + fchan->fdesc = NULL; + fchan->status = DMA_COMPLETE; + } else { + vchan_cyclic_callback(&fchan->fdesc->vdesc); + } + + /* Start the next descriptor (if available) */ + if (!fchan->fdesc) + st_fdma_xfer_desc(fchan); + } + + spin_unlock(&fchan->vchan.lock); + ret = IRQ_HANDLED; + } + + fdma_write(fdev, clr, FDMA_INT_CLR_OFST); + + return ret; +} + +static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct st_fdma_dev *fdev = ofdma->of_dma_data; + struct dma_chan *chan; + struct st_fdma_chan *fchan; + int ret; + + if (dma_spec->args_count < 1) + return ERR_PTR(-EINVAL); + + if (fdev->dma_device.dev->of_node != dma_spec->np) + return ERR_PTR(-EINVAL); + + ret = rproc_boot(fdev->slim_rproc->rproc); + if (ret == -ENOENT) + return ERR_PTR(-EPROBE_DEFER); + else if (ret) + return ERR_PTR(ret); + + chan = dma_get_any_slave_channel(&fdev->dma_device); + if (!chan) + goto err_chan; + + fchan = to_st_fdma_chan(chan); + + fchan->cfg.of_node = dma_spec->np; + fchan->cfg.req_line = dma_spec->args[0]; + fchan->cfg.req_ctrl = 0; + fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN; + + if (dma_spec->args_count > 1) + fchan->cfg.req_ctrl = dma_spec->args[1] + & FDMA_REQ_CTRL_CFG_MASK; + + if (dma_spec->args_count > 2) + fchan->cfg.type = dma_spec->args[2]; + + if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) { + fchan->dreq_line = 0; + } else { + fchan->dreq_line = st_fdma_dreq_get(fchan); + if (IS_ERR_VALUE(fchan->dreq_line)) { + chan = ERR_PTR(fchan->dreq_line); + goto err_chan; + } + } + + dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n", + fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl); + + return chan; + +err_chan: + rproc_shutdown(fdev->slim_rproc->rproc); + return chan; + +} + +static void st_fdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct st_fdma_desc *fdesc; + int i; + + fdesc = to_st_fdma_desc(vdesc); + for (i = 0; i < fdesc->n_nodes; i++) + dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc, + fdesc->node[i].pdesc); + kfree(fdesc); +} + +static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan, + int sg_len) +{ + struct st_fdma_desc *fdesc; + int i; + + fdesc = kzalloc(sizeof(*fdesc) + + sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT); + if (!fdesc) + return NULL; + + fdesc->fchan = fchan; + fdesc->n_nodes = sg_len; + for (i = 0; i < sg_len; i++) { + fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool, + GFP_NOWAIT, &fdesc->node[i].pdesc); + if (!fdesc->node[i].desc) + goto err; + } + return fdesc; + +err: + while (--i >= 0) + dma_pool_free(fchan->node_pool, fdesc->node[i].desc, + fdesc->node[i].pdesc); + kfree(fdesc); + return NULL; +} + +static int st_fdma_alloc_chan_res(struct dma_chan *chan) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + + /* Create the dma pool for descriptor allocation */ + fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device), + fchan->fdev->dev, + sizeof(struct st_fdma_hw_node), + __alignof__(struct st_fdma_hw_node), + 0); + + if (!fchan->node_pool) { + dev_err(fchan->fdev->dev, "unable to allocate desc pool\n"); + return -ENOMEM; + } + + dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n", + fchan->vchan.chan.chan_id, fchan->cfg.type); + + return 0; +} + +static void st_fdma_free_chan_res(struct dma_chan *chan) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + struct rproc *rproc = fchan->fdev->slim_rproc->rproc; + unsigned long flags; + + LIST_HEAD(head); + + dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", + __func__, fchan->vchan.chan.chan_id); + + if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN) + st_fdma_dreq_put(fchan); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + fchan->fdesc = NULL; + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + dma_pool_destroy(fchan->node_pool); + fchan->node_pool = NULL; + memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg)); + + rproc_shutdown(rproc); +} + +static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct st_fdma_chan *fchan; + struct st_fdma_desc *fdesc; + struct st_fdma_hw_node *hw_node; + + if (!len) + return NULL; + + fchan = to_st_fdma_chan(chan); + + /* We only require a single descriptor */ + fdesc = st_fdma_alloc_desc(fchan, 1); + if (!fdesc) { + dev_err(fchan->fdev->dev, "no memory for desc\n"); + return NULL; + } + + hw_node = fdesc->node[0].desc; + hw_node->next = 0; + hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN; + hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; + hw_node->control |= FDMA_NODE_CTRL_DST_INCR; + hw_node->control |= FDMA_NODE_CTRL_INT_EON; + hw_node->nbytes = len; + hw_node->saddr = src; + hw_node->daddr = dst; + hw_node->generic.length = len; + hw_node->generic.sstride = 0; + hw_node->generic.dstride = 0; + + return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); +} + +static int config_reqctrl(struct st_fdma_chan *fchan, + enum dma_transfer_direction direction) +{ + u32 maxburst = 0, addr = 0; + enum dma_slave_buswidth width; + int ch_id = fchan->vchan.chan.chan_id; + struct st_fdma_dev *fdev = fchan->fdev; + + switch (direction) { + + case DMA_DEV_TO_MEM: + fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR; + maxburst = fchan->scfg.src_maxburst; + width = fchan->scfg.src_addr_width; + addr = fchan->scfg.src_addr; + break; + + case DMA_MEM_TO_DEV: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR; + maxburst = fchan->scfg.dst_maxburst; + width = fchan->scfg.dst_addr_width; + addr = fchan->scfg.dst_addr; + break; + + default: + return -EINVAL; + } + + fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK; + + switch (width) { + + case DMA_SLAVE_BUSWIDTH_1_BYTE: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1; + break; + + case DMA_SLAVE_BUSWIDTH_2_BYTES: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2; + break; + + case DMA_SLAVE_BUSWIDTH_4_BYTES: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4; + break; + + case DMA_SLAVE_BUSWIDTH_8_BYTES: + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8; + break; + + default: + return -EINVAL; + } + + fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK; + fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1); + dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST); + + fchan->cfg.dev_addr = addr; + fchan->cfg.dir = direction; + + dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n", + ch_id, addr, fchan->cfg.req_ctrl); + + return 0; +} + +static void fill_hw_node(struct st_fdma_hw_node *hw_node, + struct st_fdma_chan *fchan, + enum dma_transfer_direction direction) +{ + if (direction == DMA_MEM_TO_DEV) { + hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; + hw_node->control |= FDMA_NODE_CTRL_DST_STATIC; + hw_node->daddr = fchan->cfg.dev_addr; + } else { + hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC; + hw_node->control |= FDMA_NODE_CTRL_DST_INCR; + hw_node->saddr = fchan->cfg.dev_addr; + } + + hw_node->generic.sstride = 0; + hw_node->generic.dstride = 0; +} + +static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan, + size_t len, enum dma_transfer_direction direction) +{ + struct st_fdma_chan *fchan; + + if (!chan || !len) + return NULL; + + fchan = to_st_fdma_chan(chan); + + if (!is_slave_direction(direction)) { + dev_err(fchan->fdev->dev, "bad direction?\n"); + return NULL; + } + + return fchan; +} + +static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct st_fdma_chan *fchan; + struct st_fdma_desc *fdesc; + int sg_len, i; + + fchan = st_fdma_prep_common(chan, len, direction); + if (!fchan) + return NULL; + + if (!period_len) + return NULL; + + if (config_reqctrl(fchan, direction)) { + dev_err(fchan->fdev->dev, "bad width or direction\n"); + return NULL; + } + + /* the buffer length must be a multiple of period_len */ + if (len % period_len != 0) { + dev_err(fchan->fdev->dev, "len is not multiple of period\n"); + return NULL; + } + + sg_len = len / period_len; + fdesc = st_fdma_alloc_desc(fchan, sg_len); + if (!fdesc) { + dev_err(fchan->fdev->dev, "no memory for desc\n"); + return NULL; + } + + fdesc->iscyclic = true; + + for (i = 0; i < sg_len; i++) { + struct st_fdma_hw_node *hw_node = fdesc->node[i].desc; + + hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; + + hw_node->control = + FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); + hw_node->control |= FDMA_NODE_CTRL_INT_EON; + + fill_hw_node(hw_node, fchan, direction); + + if (direction == DMA_MEM_TO_DEV) + hw_node->saddr = buf_addr + (i * period_len); + else + hw_node->daddr = buf_addr + (i * period_len); + + hw_node->nbytes = period_len; + hw_node->generic.length = period_len; + } + + return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); +} + +static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct st_fdma_chan *fchan; + struct st_fdma_desc *fdesc; + struct st_fdma_hw_node *hw_node; + struct scatterlist *sg; + int i; + + fchan = st_fdma_prep_common(chan, sg_len, direction); + if (!fchan) + return NULL; + + if (!sgl) + return NULL; + + fdesc = st_fdma_alloc_desc(fchan, sg_len); + if (!fdesc) { + dev_err(fchan->fdev->dev, "no memory for desc\n"); + return NULL; + } + + fdesc->iscyclic = false; + + for_each_sg(sgl, sg, sg_len, i) { + hw_node = fdesc->node[i].desc; + + hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; + hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); + + fill_hw_node(hw_node, fchan, direction); + + if (direction == DMA_MEM_TO_DEV) + hw_node->saddr = sg_dma_address(sg); + else + hw_node->daddr = sg_dma_address(sg); + + hw_node->nbytes = sg_dma_len(sg); + hw_node->generic.length = sg_dma_len(sg); + } + + /* interrupt at end of last node */ + hw_node->control |= FDMA_NODE_CTRL_INT_EON; + + return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); +} + +static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan, + struct virt_dma_desc *vdesc, + bool in_progress) +{ + struct st_fdma_desc *fdesc = fchan->fdesc; + size_t residue = 0; + dma_addr_t cur_addr = 0; + int i; + + if (in_progress) { + cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST); + cur_addr &= FDMA_CH_CMD_DATA_MASK; + } + + for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) { + if (cur_addr == fdesc->node[i].pdesc) { + residue += fnode_read(fchan, FDMA_CNTN_OFST); + break; + } + residue += fdesc->node[i].desc->nbytes; + } + + return residue; +} + +static enum dma_status st_fdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&fchan->vchan.lock, flags); + vd = vchan_find_desc(&fchan->vchan, cookie); + if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie) + txstate->residue = st_fdma_desc_residue(fchan, vd, true); + else if (vd) + txstate->residue = st_fdma_desc_residue(fchan, vd, false); + else + txstate->residue = 0; + + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + return ret; +} + +static void st_fdma_issue_pending(struct dma_chan *chan) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&fchan->vchan.lock, flags); + + if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc) + st_fdma_xfer_desc(fchan); + + spin_unlock_irqrestore(&fchan->vchan.lock, flags); +} + +static int st_fdma_pause(struct dma_chan *chan) +{ + unsigned long flags; + LIST_HEAD(head); + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + int ch_id = fchan->vchan.chan.chan_id; + unsigned long cmd = FDMA_CMD_PAUSE(ch_id); + + dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + if (fchan->fdesc) + fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST); + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + return 0; +} + +static int st_fdma_resume(struct dma_chan *chan) +{ + unsigned long flags; + unsigned long val; + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + int ch_id = fchan->vchan.chan.chan_id; + + dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + if (fchan->fdesc) { + val = fchan_read(fchan, FDMA_CH_CMD_OFST); + val &= FDMA_CH_CMD_DATA_MASK; + fchan_write(fchan, val, FDMA_CH_CMD_OFST); + } + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + + return 0; +} + +static int st_fdma_terminate_all(struct dma_chan *chan) +{ + unsigned long flags; + LIST_HEAD(head); + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + int ch_id = fchan->vchan.chan.chan_id; + unsigned long cmd = FDMA_CMD_PAUSE(ch_id); + + dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id); + + spin_lock_irqsave(&fchan->vchan.lock, flags); + fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST); + fchan->fdesc = NULL; + vchan_get_all_descriptors(&fchan->vchan, &head); + spin_unlock_irqrestore(&fchan->vchan.lock, flags); + vchan_dma_desc_free_list(&fchan->vchan, &head); + + return 0; +} + +static int st_fdma_slave_config(struct dma_chan *chan, + struct dma_slave_config *slave_cfg) +{ + struct st_fdma_chan *fchan = to_st_fdma_chan(chan); + + memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg)); + return 0; +} + +static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = { + .name = "STiH407", + .id = 0, +}; + +static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = { + .name = "STiH407", + .id = 1, +}; + +static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = { + .name = "STiH407", + .id = 2, +}; + +static const struct of_device_id st_fdma_match[] = { + { .compatible = "st,stih407-fdma-mpe31-11" + , .data = &fdma_mpe31_stih407_11 }, + { .compatible = "st,stih407-fdma-mpe31-12" + , .data = &fdma_mpe31_stih407_12 }, + { .compatible = "st,stih407-fdma-mpe31-13" + , .data = &fdma_mpe31_stih407_13 }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_fdma_match); + +static int st_fdma_parse_dt(struct platform_device *pdev, + const struct st_fdma_driverdata *drvdata, + struct st_fdma_dev *fdev) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + + if (!np) + goto err; + + ret = of_property_read_u32(np, "dma-channels", &fdev->nr_channels); + if (ret) + goto err; + + snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf", + drvdata->name, drvdata->id); + +err: + return ret; +} +#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static int st_fdma_probe(struct platform_device *pdev) +{ + struct st_fdma_dev *fdev; + const struct of_device_id *match; + struct device_node *np = pdev->dev.of_node; + const struct st_fdma_driverdata *drvdata; + int ret, i; + + match = of_match_device((st_fdma_match), &pdev->dev); + if (!match || !match->data) { + dev_err(&pdev->dev, "No device match found\n"); + return -ENODEV; + } + + drvdata = match->data; + + fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL); + if (!fdev) + return -ENOMEM; + + ret = st_fdma_parse_dt(pdev, drvdata, fdev); + if (ret) { + dev_err(&pdev->dev, "unable to find platform data\n"); + goto err; + } + + fdev->chans = devm_kzalloc(&pdev->dev, + fdev->nr_channels + * sizeof(struct st_fdma_chan), GFP_KERNEL); + if (!fdev->chans) + return -ENOMEM; + + fdev->dev = &pdev->dev; + fdev->drvdata = drvdata; + platform_set_drvdata(pdev, fdev); + + fdev->irq = platform_get_irq(pdev, 0); + if (fdev->irq < 0) { + dev_err(&pdev->dev, "Failed to get irq resource\n"); + return -EINVAL; + } + + ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0, + dev_name(&pdev->dev), fdev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret); + goto err; + } + + fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name); + if (!fdev->slim_rproc) { + ret = PTR_ERR(fdev->slim_rproc); + dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret); + goto err; + } + + /* Initialise list of FDMA channels */ + INIT_LIST_HEAD(&fdev->dma_device.channels); + for (i = 0; i < fdev->nr_channels; i++) { + struct st_fdma_chan *fchan = &fdev->chans[i]; + + fchan->fdev = fdev; + fchan->vchan.desc_free = st_fdma_free_desc; + vchan_init(&fchan->vchan, &fdev->dma_device); + } + + /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */ + fdev->dreq_mask = BIT(0) | BIT(31); + + dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask); + dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask); + dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask); + + fdev->dma_device.dev = &pdev->dev; + fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res; + fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res; + fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic; + fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg; + fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy; + fdev->dma_device.device_tx_status = st_fdma_tx_status; + fdev->dma_device.device_issue_pending = st_fdma_issue_pending; + fdev->dma_device.device_terminate_all = st_fdma_terminate_all; + fdev->dma_device.device_config = st_fdma_slave_config; + fdev->dma_device.device_pause = st_fdma_pause; + fdev->dma_device.device_resume = st_fdma_resume; + + fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS; + fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS; + fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + ret = dma_async_device_register(&fdev->dma_device); + if (ret) { + dev_err(&pdev->dev, + "Failed to register DMA device (%d)\n", ret); + goto err_rproc; + } + + ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev); + if (ret) { + dev_err(&pdev->dev, + "Failed to register controller (%d)\n", ret); + goto err_dma_dev; + } + + dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq); + + return 0; + +err_dma_dev: + dma_async_device_unregister(&fdev->dma_device); +err_rproc: + st_slim_rproc_put(fdev->slim_rproc); +err: + return ret; +} + +static int st_fdma_remove(struct platform_device *pdev) +{ + struct st_fdma_dev *fdev = platform_get_drvdata(pdev); + + devm_free_irq(&pdev->dev, fdev->irq, fdev); + st_slim_rproc_put(fdev->slim_rproc); + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&fdev->dma_device); + + return 0; +} + +static struct platform_driver st_fdma_platform_driver = { + .driver = { + .name = "st-fdma", + .of_match_table = st_fdma_match, + }, + .probe = st_fdma_probe, + .remove = st_fdma_remove, +}; +module_platform_driver(st_fdma_platform_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); +MODULE_AUTHOR("Ludovic.barre "); +MODULE_AUTHOR("Peter Griffin "); -- 1.9.1