From mboxrd@z Thu Jan 1 00:00:00 1970 From: Stefan Roese Date: Thu, 23 Mar 2017 17:01:35 +0100 Subject: [U-Boot] [PATCH v2 09/45] net: mvpp2: use "dma" instead of "phys" where appropriate In-Reply-To: <20170323160211.18072-1-sr@denx.de> References: <20170323160211.18072-1-sr@denx.de> Message-ID: <20170323160211.18072-10-sr@denx.de> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: u-boot@lists.denx.de From: Thomas Petazzoni As indicated by Russell King, the mvpp2 driver currently uses a lot "phys" or "phys_addr" to store what really is a DMA address. This commit clarifies this by using "dma" or "dma_addr" where appropriate. This is especially important as we are going to introduce more changes where the distinction between physical address and DMA address will be key. Signed-off-by: Thomas Petazzoni Signed-off-by: Stefan Roese Acked-by: Joe Hershberger --- Changes in v2: - Added Acked-by from Joe drivers/net/mvpp2.c | 61 +++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/drivers/net/mvpp2.c b/drivers/net/mvpp2.c index 82d3ff9c35..53ddb330e9 100644 --- a/drivers/net/mvpp2.c +++ b/drivers/net/mvpp2.c @@ -799,7 +799,7 @@ struct mvpp2_tx_desc { u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_phys_addr; /* physical addr of transmitted buffer */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ u32 buf_cookie; /* cookie for access to TX buffer in tx path */ u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ u32 reserved2; /* reserved (for future use) */ @@ -809,7 +809,7 @@ struct mvpp2_rx_desc { u32 status; /* info about received packet */ u16 reserved1; /* parser_info (for future use, PnC) */ u16 data_size; /* size of received packet in bytes */ - u32 buf_phys_addr; /* physical address of the buffer */ + u32 buf_dma_addr; /* physical address of the buffer */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved2; /* gem_port_id (for future use, PON) */ u16 reserved3; /* csum_l4 (for future use, PnC) */ @@ -864,7 +864,7 @@ struct mvpp2_tx_queue { struct mvpp2_tx_desc *descs; /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last Tx DMA descriptor */ int last_desc; @@ -887,7 +887,7 @@ struct mvpp2_rx_queue { struct mvpp2_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last RX DMA descriptor */ int last_desc; @@ -960,8 +960,8 @@ struct mvpp2_bm_pool { /* BPPE virtual base address */ unsigned long *virt_addr; - /* BPPE physical base address */ - dma_addr_t phys_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; /* Ports using BM pool */ u32 port_map; @@ -971,7 +971,7 @@ struct mvpp2_bm_pool { }; struct mvpp2_buff_hdr { - u32 next_buff_phys_addr; + u32 next_buff_dma_addr; u32 next_buff_virt_addr; u16 byte_count; u16 info; @@ -2215,7 +2215,7 @@ static int mvpp2_bm_pool_create(struct udevice *dev, u32 val; bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; - bm_pool->phys_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; + bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; if (!bm_pool->virt_addr) return -ENOMEM; @@ -2227,7 +2227,7 @@ static int mvpp2_bm_pool_create(struct udevice *dev, } mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), - bm_pool->phys_addr); + bm_pool->dma_addr); mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); @@ -2367,20 +2367,21 @@ static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - dma_addr_t buf_phys_addr, + dma_addr_t buf_dma_addr, unsigned long buf_virt_addr) { mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); - mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); + mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); } /* Refill BM pool */ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, - u32 phys_addr, u32 cookie) + dma_addr_t dma_addr, + u32 cookie) { int pool = mvpp2_bm_cookie_pool_get(bm); - mvpp2_bm_pool_put(port, pool, phys_addr, cookie); + mvpp2_bm_pool_put(port, pool, dma_addr, cookie); } /* Allocate buffers for the pool */ @@ -2944,7 +2945,7 @@ static int mvpp2_aggr_txq_init(struct udevice *dev, { /* Allocate memory for TX descriptors */ aggr_txq->descs = buffer_loc.aggr_tx_descs; - aggr_txq->descs_phys = (dma_addr_t)buffer_loc.aggr_tx_descs; + aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; if (!aggr_txq->descs) return -ENOMEM; @@ -2961,7 +2962,7 @@ static int mvpp2_aggr_txq_init(struct udevice *dev, /* Set Tx descriptors queue starting address */ /* indirect access */ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), - aggr_txq->descs_phys); + aggr_txq->descs_dma); mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); return 0; @@ -2976,7 +2977,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, /* Allocate memory for RX descriptors */ rxq->descs = buffer_loc.rx_descs; - rxq->descs_phys = (dma_addr_t)buffer_loc.rx_descs; + rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; if (!rxq->descs) return -ENOMEM; @@ -2990,7 +2991,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, /* Set Rx descriptors queue starting address - indirect access */ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma); mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); @@ -3017,7 +3018,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); u32 bm = mvpp2_bm_cookie_build(rx_desc); - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, + mvpp2_pool_refill(port, bm, rx_desc->buf_dma_addr, rx_desc->buf_cookie); } mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); @@ -3032,7 +3033,7 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; - rxq->descs_phys = 0; + rxq->descs_dma = 0; /* Clear Rx descriptors queue starting address and size; * free descriptor number @@ -3055,7 +3056,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Allocate memory for Tx descriptors */ txq->descs = buffer_loc.tx_descs; - txq->descs_phys = (dma_addr_t)buffer_loc.tx_descs; + txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; if (!txq->descs) return -ENOMEM; @@ -3067,7 +3068,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Set Tx descriptors queue starting address - indirect access */ mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); @@ -3119,7 +3120,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; - txq->descs_phys = 0; + txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); @@ -3333,9 +3334,9 @@ static void mvpp2_rx_error(struct mvpp2_port *port, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, - u32 bm, u32 phys_addr) + u32 bm, dma_addr_t dma_addr) { - mvpp2_pool_refill(port, bm, phys_addr, (unsigned long)phys_addr); + mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); return 0; } @@ -3854,7 +3855,7 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) struct mvpp2_port *port = dev_get_priv(dev); struct mvpp2_rx_desc *rx_desc; struct mvpp2_bm_pool *bm_pool; - dma_addr_t phys_addr; + dma_addr_t dma_addr; u32 bm, rx_status; int pool, rx_bytes, err; int rx_received; @@ -3885,7 +3886,7 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) rx_desc = mvpp2_rxq_next_desc_get(rxq); rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; - phys_addr = rx_desc->buf_phys_addr; + dma_addr = rx_desc->buf_dma_addr; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); @@ -3903,12 +3904,12 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) if (rx_status & MVPP2_RXD_ERR_SUMMARY) { mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, + mvpp2_pool_refill(port, bm, rx_desc->buf_dma_addr, rx_desc->buf_cookie); return 0; } - err = mvpp2_rx_refill(port, bm_pool, bm, phys_addr); + err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); return 0; @@ -3919,7 +3920,7 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) mvpp2_rxq_status_update(port, rxq->id, 1, 1); /* give packet to stack - skip on first n bytes */ - data = (u8 *)phys_addr + 2 + 32; + data = (u8 *)dma_addr + 2 + 32; if (rx_bytes <= 0) return 0; @@ -3964,7 +3965,7 @@ static int mvpp2_send(struct udevice *dev, void *packet, int length) tx_desc->phys_txq = txq->id; tx_desc->data_size = length; tx_desc->packet_offset = (unsigned long)packet & MVPP2_TX_DESC_ALIGN; - tx_desc->buf_phys_addr = (unsigned long)packet & ~MVPP2_TX_DESC_ALIGN; + tx_desc->buf_dma_addr = (unsigned long)packet & ~MVPP2_TX_DESC_ALIGN; /* First and Last descriptor */ tx_desc->command = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; -- 2.12.1